GNU Linux-libre 5.19-rc6-gnu
[releases.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2016 Broadcom Corporation.
8  * Copyright (C) 2016-2017 Broadcom Limited.
9  * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10  * refers to Broadcom Inc. and/or its subsidiaries.
11  *
12 /*(DEBLOBBED)*/
13
14
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17 #include <linux/stringify.h>
18 #include <linux/kernel.h>
19 #include <linux/sched/signal.h>
20 #include <linux/types.h>
21 #include <linux/compiler.h>
22 #include <linux/slab.h>
23 #include <linux/delay.h>
24 #include <linux/in.h>
25 #include <linux/interrupt.h>
26 #include <linux/ioport.h>
27 #include <linux/pci.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/skbuff.h>
31 #include <linux/ethtool.h>
32 #include <linux/mdio.h>
33 #include <linux/mii.h>
34 #include <linux/phy.h>
35 #include <linux/brcmphy.h>
36 #include <linux/if.h>
37 #include <linux/if_vlan.h>
38 #include <linux/ip.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/firmware.h>
44 #include <linux/ssb/ssb_driver_gige.h>
45 #include <linux/hwmon.h>
46 #include <linux/hwmon-sysfs.h>
47 #include <linux/crc32poly.h>
48
49 #include <net/checksum.h>
50 #include <net/ip.h>
51
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #include <uapi/linux/net_tstamp.h>
57 #include <linux/ptp_clock_kernel.h>
58
59 #define BAR_0   0
60 #define BAR_2   2
61
62 #include "tg3.h"
63
64 /* Functions & macros to verify TG3_FLAGS types */
65
66 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
67 {
68         return test_bit(flag, bits);
69 }
70
71 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
72 {
73         set_bit(flag, bits);
74 }
75
76 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
77 {
78         clear_bit(flag, bits);
79 }
80
81 #define tg3_flag(tp, flag)                              \
82         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
83 #define tg3_flag_set(tp, flag)                          \
84         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_clear(tp, flag)                        \
86         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
87
88 #define DRV_MODULE_NAME         "tg3"
89 /* DO NOT UPDATE TG3_*_NUM defines */
90 #define TG3_MAJ_NUM                     3
91 #define TG3_MIN_NUM                     137
92
93 #define RESET_KIND_SHUTDOWN     0
94 #define RESET_KIND_INIT         1
95 #define RESET_KIND_SUSPEND      2
96
97 #define TG3_DEF_RX_MODE         0
98 #define TG3_DEF_TX_MODE         0
99 #define TG3_DEF_MSG_ENABLE        \
100         (NETIF_MSG_DRV          | \
101          NETIF_MSG_PROBE        | \
102          NETIF_MSG_LINK         | \
103          NETIF_MSG_TIMER        | \
104          NETIF_MSG_IFDOWN       | \
105          NETIF_MSG_IFUP         | \
106          NETIF_MSG_RX_ERR       | \
107          NETIF_MSG_TX_ERR)
108
109 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
110
111 /* length of time before we decide the hardware is borked,
112  * and dev->tx_timeout() should be called to fix the problem
113  */
114
115 #define TG3_TX_TIMEOUT                  (5 * HZ)
116
117 /* hardware minimum and maximum for a single frame's data payload */
118 #define TG3_MIN_MTU                     ETH_ZLEN
119 #define TG3_MAX_MTU(tp) \
120         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
121
122 /* These numbers seem to be hard coded in the NIC firmware somehow.
123  * You can't change the ring sizes, but you can change where you place
124  * them in the NIC onboard memory.
125  */
126 #define TG3_RX_STD_RING_SIZE(tp) \
127         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
128          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
129 #define TG3_DEF_RX_RING_PENDING         200
130 #define TG3_RX_JMB_RING_SIZE(tp) \
131         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
133 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
134
135 /* Do not place this n-ring entries value into the tp struct itself,
136  * we really want to expose these constants to GCC so that modulo et
137  * al.  operations are done with shifts and masks instead of with
138  * hw multiply/modulo instructions.  Another solution would be to
139  * replace things like '% foo' with '& (foo - 1)'.
140  */
141
142 #define TG3_TX_RING_SIZE                512
143 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
144
145 #define TG3_RX_STD_RING_BYTES(tp) \
146         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
147 #define TG3_RX_JMB_RING_BYTES(tp) \
148         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
149 #define TG3_RX_RCB_RING_BYTES(tp) \
150         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
151 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
152                                  TG3_TX_RING_SIZE)
153 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
154
155 #define TG3_DMA_BYTE_ENAB               64
156
157 #define TG3_RX_STD_DMA_SZ               1536
158 #define TG3_RX_JMB_DMA_SZ               9046
159
160 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
161
162 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
163 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
164
165 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
166         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
167
168 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
169         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
170
171 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
172  * that are at least dword aligned when used in PCIX mode.  The driver
173  * works around this bug by double copying the packet.  This workaround
174  * is built into the normal double copy length check for efficiency.
175  *
176  * However, the double copy is only necessary on those architectures
177  * where unaligned memory accesses are inefficient.  For those architectures
178  * where unaligned memory accesses incur little penalty, we can reintegrate
179  * the 5701 in the normal rx path.  Doing so saves a device structure
180  * dereference by hardcoding the double copy threshold in place.
181  */
182 #define TG3_RX_COPY_THRESHOLD           256
183 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
184         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
185 #else
186         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
187 #endif
188
189 #if (NET_IP_ALIGN != 0)
190 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
191 #else
192 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
193 #endif
194
195 /* minimum number of free TX descriptors required to wake up TX process */
196 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
197 #define TG3_TX_BD_DMA_MAX_2K            2048
198 #define TG3_TX_BD_DMA_MAX_4K            4096
199
200 #define TG3_RAW_IP_ALIGN 2
201
202 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
203 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
204
205 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
206 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
207
208 #define FIRMWARE_TG3            "/*(DEBLOBBED)*/"
209 #define FIRMWARE_TG357766       "/*(DEBLOBBED)*/"
210 #define FIRMWARE_TG3TSO         "/*(DEBLOBBED)*/"
211 #define FIRMWARE_TG3TSO5        "/*(DEBLOBBED)*/"
212
213 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
214 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
215 MODULE_LICENSE("GPL");
216 /*(DEBLOBBED)*/
217
218 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
219 module_param(tg3_debug, int, 0);
220 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
221
222 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
223 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
224
225 static const struct pci_device_id tg3_pci_tbl[] = {
226         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
227         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
245          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
246                         TG3_DRV_DATA_FLAG_5705_10_100},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
248          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
249                         TG3_DRV_DATA_FLAG_5705_10_100},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
252          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
253                         TG3_DRV_DATA_FLAG_5705_10_100},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
260          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
266          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
274         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
275                         PCI_VENDOR_ID_LENOVO,
276                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
277          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
280          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
299         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
300                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
301          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
302         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
303                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
304          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
308          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
309         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
310         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
311         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
312         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
313         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
314         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
315         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
316         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
317         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
318          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
319         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
320          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
322         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
329         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
330         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
332         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
333         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
334         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
335         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
336         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
337         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
338         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
339         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
340         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
341         {}
342 };
343
344 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
345
346 static const struct {
347         const char string[ETH_GSTRING_LEN];
348 } ethtool_stats_keys[] = {
349         { "rx_octets" },
350         { "rx_fragments" },
351         { "rx_ucast_packets" },
352         { "rx_mcast_packets" },
353         { "rx_bcast_packets" },
354         { "rx_fcs_errors" },
355         { "rx_align_errors" },
356         { "rx_xon_pause_rcvd" },
357         { "rx_xoff_pause_rcvd" },
358         { "rx_mac_ctrl_rcvd" },
359         { "rx_xoff_entered" },
360         { "rx_frame_too_long_errors" },
361         { "rx_jabbers" },
362         { "rx_undersize_packets" },
363         { "rx_in_length_errors" },
364         { "rx_out_length_errors" },
365         { "rx_64_or_less_octet_packets" },
366         { "rx_65_to_127_octet_packets" },
367         { "rx_128_to_255_octet_packets" },
368         { "rx_256_to_511_octet_packets" },
369         { "rx_512_to_1023_octet_packets" },
370         { "rx_1024_to_1522_octet_packets" },
371         { "rx_1523_to_2047_octet_packets" },
372         { "rx_2048_to_4095_octet_packets" },
373         { "rx_4096_to_8191_octet_packets" },
374         { "rx_8192_to_9022_octet_packets" },
375
376         { "tx_octets" },
377         { "tx_collisions" },
378
379         { "tx_xon_sent" },
380         { "tx_xoff_sent" },
381         { "tx_flow_control" },
382         { "tx_mac_errors" },
383         { "tx_single_collisions" },
384         { "tx_mult_collisions" },
385         { "tx_deferred" },
386         { "tx_excessive_collisions" },
387         { "tx_late_collisions" },
388         { "tx_collide_2times" },
389         { "tx_collide_3times" },
390         { "tx_collide_4times" },
391         { "tx_collide_5times" },
392         { "tx_collide_6times" },
393         { "tx_collide_7times" },
394         { "tx_collide_8times" },
395         { "tx_collide_9times" },
396         { "tx_collide_10times" },
397         { "tx_collide_11times" },
398         { "tx_collide_12times" },
399         { "tx_collide_13times" },
400         { "tx_collide_14times" },
401         { "tx_collide_15times" },
402         { "tx_ucast_packets" },
403         { "tx_mcast_packets" },
404         { "tx_bcast_packets" },
405         { "tx_carrier_sense_errors" },
406         { "tx_discards" },
407         { "tx_errors" },
408
409         { "dma_writeq_full" },
410         { "dma_write_prioq_full" },
411         { "rxbds_empty" },
412         { "rx_discards" },
413         { "rx_errors" },
414         { "rx_threshold_hit" },
415
416         { "dma_readq_full" },
417         { "dma_read_prioq_full" },
418         { "tx_comp_queue_full" },
419
420         { "ring_set_send_prod_index" },
421         { "ring_status_update" },
422         { "nic_irqs" },
423         { "nic_avoided_irqs" },
424         { "nic_tx_threshold_hit" },
425
426         { "mbuf_lwm_thresh_hit" },
427 };
428
429 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
430 #define TG3_NVRAM_TEST          0
431 #define TG3_LINK_TEST           1
432 #define TG3_REGISTER_TEST       2
433 #define TG3_MEMORY_TEST         3
434 #define TG3_MAC_LOOPB_TEST      4
435 #define TG3_PHY_LOOPB_TEST      5
436 #define TG3_EXT_LOOPB_TEST      6
437 #define TG3_INTERRUPT_TEST      7
438
439
440 static const struct {
441         const char string[ETH_GSTRING_LEN];
442 } ethtool_test_keys[] = {
443         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
444         [TG3_LINK_TEST]         = { "link test         (online) " },
445         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
446         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
447         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
448         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
449         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
450         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
451 };
452
453 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
454
455
456 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
457 {
458         writel(val, tp->regs + off);
459 }
460
461 static u32 tg3_read32(struct tg3 *tp, u32 off)
462 {
463         return readl(tp->regs + off);
464 }
465
466 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
467 {
468         writel(val, tp->aperegs + off);
469 }
470
471 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
472 {
473         return readl(tp->aperegs + off);
474 }
475
476 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
477 {
478         unsigned long flags;
479
480         spin_lock_irqsave(&tp->indirect_lock, flags);
481         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
482         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
483         spin_unlock_irqrestore(&tp->indirect_lock, flags);
484 }
485
486 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
487 {
488         writel(val, tp->regs + off);
489         readl(tp->regs + off);
490 }
491
492 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
493 {
494         unsigned long flags;
495         u32 val;
496
497         spin_lock_irqsave(&tp->indirect_lock, flags);
498         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
499         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
500         spin_unlock_irqrestore(&tp->indirect_lock, flags);
501         return val;
502 }
503
504 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
505 {
506         unsigned long flags;
507
508         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
509                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
510                                        TG3_64BIT_REG_LOW, val);
511                 return;
512         }
513         if (off == TG3_RX_STD_PROD_IDX_REG) {
514                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
515                                        TG3_64BIT_REG_LOW, val);
516                 return;
517         }
518
519         spin_lock_irqsave(&tp->indirect_lock, flags);
520         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
521         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
522         spin_unlock_irqrestore(&tp->indirect_lock, flags);
523
524         /* In indirect mode when disabling interrupts, we also need
525          * to clear the interrupt bit in the GRC local ctrl register.
526          */
527         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
528             (val == 0x1)) {
529                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
530                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
531         }
532 }
533
534 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
535 {
536         unsigned long flags;
537         u32 val;
538
539         spin_lock_irqsave(&tp->indirect_lock, flags);
540         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
541         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
542         spin_unlock_irqrestore(&tp->indirect_lock, flags);
543         return val;
544 }
545
546 /* usec_wait specifies the wait time in usec when writing to certain registers
547  * where it is unsafe to read back the register without some delay.
548  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
549  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
550  */
551 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
552 {
553         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
554                 /* Non-posted methods */
555                 tp->write32(tp, off, val);
556         else {
557                 /* Posted method */
558                 tg3_write32(tp, off, val);
559                 if (usec_wait)
560                         udelay(usec_wait);
561                 tp->read32(tp, off);
562         }
563         /* Wait again after the read for the posted method to guarantee that
564          * the wait time is met.
565          */
566         if (usec_wait)
567                 udelay(usec_wait);
568 }
569
570 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
571 {
572         tp->write32_mbox(tp, off, val);
573         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
574             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
575              !tg3_flag(tp, ICH_WORKAROUND)))
576                 tp->read32_mbox(tp, off);
577 }
578
579 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
580 {
581         void __iomem *mbox = tp->regs + off;
582         writel(val, mbox);
583         if (tg3_flag(tp, TXD_MBOX_HWBUG))
584                 writel(val, mbox);
585         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
586             tg3_flag(tp, FLUSH_POSTED_WRITES))
587                 readl(mbox);
588 }
589
590 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
591 {
592         return readl(tp->regs + off + GRCMBOX_BASE);
593 }
594
595 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
596 {
597         writel(val, tp->regs + off + GRCMBOX_BASE);
598 }
599
600 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
601 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
602 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
603 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
604 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
605
606 #define tw32(reg, val)                  tp->write32(tp, reg, val)
607 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
608 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
609 #define tr32(reg)                       tp->read32(tp, reg)
610
611 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
612 {
613         unsigned long flags;
614
615         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
616             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
617                 return;
618
619         spin_lock_irqsave(&tp->indirect_lock, flags);
620         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
621                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
622                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
623
624                 /* Always leave this as zero. */
625                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
626         } else {
627                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
628                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
629
630                 /* Always leave this as zero. */
631                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
632         }
633         spin_unlock_irqrestore(&tp->indirect_lock, flags);
634 }
635
636 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
637 {
638         unsigned long flags;
639
640         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
641             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
642                 *val = 0;
643                 return;
644         }
645
646         spin_lock_irqsave(&tp->indirect_lock, flags);
647         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
648                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
649                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
650
651                 /* Always leave this as zero. */
652                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
653         } else {
654                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
655                 *val = tr32(TG3PCI_MEM_WIN_DATA);
656
657                 /* Always leave this as zero. */
658                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
659         }
660         spin_unlock_irqrestore(&tp->indirect_lock, flags);
661 }
662
663 static void tg3_ape_lock_init(struct tg3 *tp)
664 {
665         int i;
666         u32 regbase, bit;
667
668         if (tg3_asic_rev(tp) == ASIC_REV_5761)
669                 regbase = TG3_APE_LOCK_GRANT;
670         else
671                 regbase = TG3_APE_PER_LOCK_GRANT;
672
673         /* Make sure the driver hasn't any stale locks. */
674         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
675                 switch (i) {
676                 case TG3_APE_LOCK_PHY0:
677                 case TG3_APE_LOCK_PHY1:
678                 case TG3_APE_LOCK_PHY2:
679                 case TG3_APE_LOCK_PHY3:
680                         bit = APE_LOCK_GRANT_DRIVER;
681                         break;
682                 default:
683                         if (!tp->pci_fn)
684                                 bit = APE_LOCK_GRANT_DRIVER;
685                         else
686                                 bit = 1 << tp->pci_fn;
687                 }
688                 tg3_ape_write32(tp, regbase + 4 * i, bit);
689         }
690
691 }
692
693 static int tg3_ape_lock(struct tg3 *tp, int locknum)
694 {
695         int i, off;
696         int ret = 0;
697         u32 status, req, gnt, bit;
698
699         if (!tg3_flag(tp, ENABLE_APE))
700                 return 0;
701
702         switch (locknum) {
703         case TG3_APE_LOCK_GPIO:
704                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
705                         return 0;
706                 fallthrough;
707         case TG3_APE_LOCK_GRC:
708         case TG3_APE_LOCK_MEM:
709                 if (!tp->pci_fn)
710                         bit = APE_LOCK_REQ_DRIVER;
711                 else
712                         bit = 1 << tp->pci_fn;
713                 break;
714         case TG3_APE_LOCK_PHY0:
715         case TG3_APE_LOCK_PHY1:
716         case TG3_APE_LOCK_PHY2:
717         case TG3_APE_LOCK_PHY3:
718                 bit = APE_LOCK_REQ_DRIVER;
719                 break;
720         default:
721                 return -EINVAL;
722         }
723
724         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
725                 req = TG3_APE_LOCK_REQ;
726                 gnt = TG3_APE_LOCK_GRANT;
727         } else {
728                 req = TG3_APE_PER_LOCK_REQ;
729                 gnt = TG3_APE_PER_LOCK_GRANT;
730         }
731
732         off = 4 * locknum;
733
734         tg3_ape_write32(tp, req + off, bit);
735
736         /* Wait for up to 1 millisecond to acquire lock. */
737         for (i = 0; i < 100; i++) {
738                 status = tg3_ape_read32(tp, gnt + off);
739                 if (status == bit)
740                         break;
741                 if (pci_channel_offline(tp->pdev))
742                         break;
743
744                 udelay(10);
745         }
746
747         if (status != bit) {
748                 /* Revoke the lock request. */
749                 tg3_ape_write32(tp, gnt + off, bit);
750                 ret = -EBUSY;
751         }
752
753         return ret;
754 }
755
756 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
757 {
758         u32 gnt, bit;
759
760         if (!tg3_flag(tp, ENABLE_APE))
761                 return;
762
763         switch (locknum) {
764         case TG3_APE_LOCK_GPIO:
765                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
766                         return;
767                 fallthrough;
768         case TG3_APE_LOCK_GRC:
769         case TG3_APE_LOCK_MEM:
770                 if (!tp->pci_fn)
771                         bit = APE_LOCK_GRANT_DRIVER;
772                 else
773                         bit = 1 << tp->pci_fn;
774                 break;
775         case TG3_APE_LOCK_PHY0:
776         case TG3_APE_LOCK_PHY1:
777         case TG3_APE_LOCK_PHY2:
778         case TG3_APE_LOCK_PHY3:
779                 bit = APE_LOCK_GRANT_DRIVER;
780                 break;
781         default:
782                 return;
783         }
784
785         if (tg3_asic_rev(tp) == ASIC_REV_5761)
786                 gnt = TG3_APE_LOCK_GRANT;
787         else
788                 gnt = TG3_APE_PER_LOCK_GRANT;
789
790         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
791 }
792
793 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
794 {
795         u32 apedata;
796
797         while (timeout_us) {
798                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
799                         return -EBUSY;
800
801                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
802                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
803                         break;
804
805                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
806
807                 udelay(10);
808                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
809         }
810
811         return timeout_us ? 0 : -EBUSY;
812 }
813
814 #ifdef CONFIG_TIGON3_HWMON
815 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
816 {
817         u32 i, apedata;
818
819         for (i = 0; i < timeout_us / 10; i++) {
820                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
821
822                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
823                         break;
824
825                 udelay(10);
826         }
827
828         return i == timeout_us / 10;
829 }
830
831 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
832                                    u32 len)
833 {
834         int err;
835         u32 i, bufoff, msgoff, maxlen, apedata;
836
837         if (!tg3_flag(tp, APE_HAS_NCSI))
838                 return 0;
839
840         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
841         if (apedata != APE_SEG_SIG_MAGIC)
842                 return -ENODEV;
843
844         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
845         if (!(apedata & APE_FW_STATUS_READY))
846                 return -EAGAIN;
847
848         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
849                  TG3_APE_SHMEM_BASE;
850         msgoff = bufoff + 2 * sizeof(u32);
851         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
852
853         while (len) {
854                 u32 length;
855
856                 /* Cap xfer sizes to scratchpad limits. */
857                 length = (len > maxlen) ? maxlen : len;
858                 len -= length;
859
860                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
861                 if (!(apedata & APE_FW_STATUS_READY))
862                         return -EAGAIN;
863
864                 /* Wait for up to 1 msec for APE to service previous event. */
865                 err = tg3_ape_event_lock(tp, 1000);
866                 if (err)
867                         return err;
868
869                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
870                           APE_EVENT_STATUS_SCRTCHPD_READ |
871                           APE_EVENT_STATUS_EVENT_PENDING;
872                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
873
874                 tg3_ape_write32(tp, bufoff, base_off);
875                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
876
877                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
878                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
879
880                 base_off += length;
881
882                 if (tg3_ape_wait_for_event(tp, 30000))
883                         return -EAGAIN;
884
885                 for (i = 0; length; i += 4, length -= 4) {
886                         u32 val = tg3_ape_read32(tp, msgoff + i);
887                         memcpy(data, &val, sizeof(u32));
888                         data++;
889                 }
890         }
891
892         return 0;
893 }
894 #endif
895
896 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
897 {
898         int err;
899         u32 apedata;
900
901         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
902         if (apedata != APE_SEG_SIG_MAGIC)
903                 return -EAGAIN;
904
905         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
906         if (!(apedata & APE_FW_STATUS_READY))
907                 return -EAGAIN;
908
909         /* Wait for up to 20 millisecond for APE to service previous event. */
910         err = tg3_ape_event_lock(tp, 20000);
911         if (err)
912                 return err;
913
914         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
915                         event | APE_EVENT_STATUS_EVENT_PENDING);
916
917         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
918         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
919
920         return 0;
921 }
922
923 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
924 {
925         u32 event;
926         u32 apedata;
927
928         if (!tg3_flag(tp, ENABLE_APE))
929                 return;
930
931         switch (kind) {
932         case RESET_KIND_INIT:
933                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
934                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
935                                 APE_HOST_SEG_SIG_MAGIC);
936                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
937                                 APE_HOST_SEG_LEN_MAGIC);
938                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
939                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
940                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
941                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
942                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
943                                 APE_HOST_BEHAV_NO_PHYLOCK);
944                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
945                                     TG3_APE_HOST_DRVR_STATE_START);
946
947                 event = APE_EVENT_STATUS_STATE_START;
948                 break;
949         case RESET_KIND_SHUTDOWN:
950                 if (device_may_wakeup(&tp->pdev->dev) &&
951                     tg3_flag(tp, WOL_ENABLE)) {
952                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
953                                             TG3_APE_HOST_WOL_SPEED_AUTO);
954                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
955                 } else
956                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
957
958                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
959
960                 event = APE_EVENT_STATUS_STATE_UNLOAD;
961                 break;
962         default:
963                 return;
964         }
965
966         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
967
968         tg3_ape_send_event(tp, event);
969 }
970
971 static void tg3_send_ape_heartbeat(struct tg3 *tp,
972                                    unsigned long interval)
973 {
974         /* Check if hb interval has exceeded */
975         if (!tg3_flag(tp, ENABLE_APE) ||
976             time_before(jiffies, tp->ape_hb_jiffies + interval))
977                 return;
978
979         tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
980         tp->ape_hb_jiffies = jiffies;
981 }
982
983 static void tg3_disable_ints(struct tg3 *tp)
984 {
985         int i;
986
987         tw32(TG3PCI_MISC_HOST_CTRL,
988              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
989         for (i = 0; i < tp->irq_max; i++)
990                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
991 }
992
993 static void tg3_enable_ints(struct tg3 *tp)
994 {
995         int i;
996
997         tp->irq_sync = 0;
998         wmb();
999
1000         tw32(TG3PCI_MISC_HOST_CTRL,
1001              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1002
1003         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1004         for (i = 0; i < tp->irq_cnt; i++) {
1005                 struct tg3_napi *tnapi = &tp->napi[i];
1006
1007                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1008                 if (tg3_flag(tp, 1SHOT_MSI))
1009                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1010
1011                 tp->coal_now |= tnapi->coal_now;
1012         }
1013
1014         /* Force an initial interrupt */
1015         if (!tg3_flag(tp, TAGGED_STATUS) &&
1016             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1017                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1018         else
1019                 tw32(HOSTCC_MODE, tp->coal_now);
1020
1021         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1022 }
1023
1024 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1025 {
1026         struct tg3 *tp = tnapi->tp;
1027         struct tg3_hw_status *sblk = tnapi->hw_status;
1028         unsigned int work_exists = 0;
1029
1030         /* check for phy events */
1031         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1032                 if (sblk->status & SD_STATUS_LINK_CHG)
1033                         work_exists = 1;
1034         }
1035
1036         /* check for TX work to do */
1037         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1038                 work_exists = 1;
1039
1040         /* check for RX work to do */
1041         if (tnapi->rx_rcb_prod_idx &&
1042             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1043                 work_exists = 1;
1044
1045         return work_exists;
1046 }
1047
1048 /* tg3_int_reenable
1049  *  similar to tg3_enable_ints, but it accurately determines whether there
1050  *  is new work pending and can return without flushing the PIO write
1051  *  which reenables interrupts
1052  */
1053 static void tg3_int_reenable(struct tg3_napi *tnapi)
1054 {
1055         struct tg3 *tp = tnapi->tp;
1056
1057         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1058
1059         /* When doing tagged status, this work check is unnecessary.
1060          * The last_tag we write above tells the chip which piece of
1061          * work we've completed.
1062          */
1063         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1064                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1065                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1066 }
1067
1068 static void tg3_switch_clocks(struct tg3 *tp)
1069 {
1070         u32 clock_ctrl;
1071         u32 orig_clock_ctrl;
1072
1073         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1074                 return;
1075
1076         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1077
1078         orig_clock_ctrl = clock_ctrl;
1079         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1080                        CLOCK_CTRL_CLKRUN_OENABLE |
1081                        0x1f);
1082         tp->pci_clock_ctrl = clock_ctrl;
1083
1084         if (tg3_flag(tp, 5705_PLUS)) {
1085                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1086                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1087                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1088                 }
1089         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1090                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1091                             clock_ctrl |
1092                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1093                             40);
1094                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1095                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1096                             40);
1097         }
1098         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1099 }
1100
1101 #define PHY_BUSY_LOOPS  5000
1102
1103 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1104                          u32 *val)
1105 {
1106         u32 frame_val;
1107         unsigned int loops;
1108         int ret;
1109
1110         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1111                 tw32_f(MAC_MI_MODE,
1112                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1113                 udelay(80);
1114         }
1115
1116         tg3_ape_lock(tp, tp->phy_ape_lock);
1117
1118         *val = 0x0;
1119
1120         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1121                       MI_COM_PHY_ADDR_MASK);
1122         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1123                       MI_COM_REG_ADDR_MASK);
1124         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1125
1126         tw32_f(MAC_MI_COM, frame_val);
1127
1128         loops = PHY_BUSY_LOOPS;
1129         while (loops != 0) {
1130                 udelay(10);
1131                 frame_val = tr32(MAC_MI_COM);
1132
1133                 if ((frame_val & MI_COM_BUSY) == 0) {
1134                         udelay(5);
1135                         frame_val = tr32(MAC_MI_COM);
1136                         break;
1137                 }
1138                 loops -= 1;
1139         }
1140
1141         ret = -EBUSY;
1142         if (loops != 0) {
1143                 *val = frame_val & MI_COM_DATA_MASK;
1144                 ret = 0;
1145         }
1146
1147         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1148                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1149                 udelay(80);
1150         }
1151
1152         tg3_ape_unlock(tp, tp->phy_ape_lock);
1153
1154         return ret;
1155 }
1156
1157 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1158 {
1159         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1160 }
1161
1162 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1163                           u32 val)
1164 {
1165         u32 frame_val;
1166         unsigned int loops;
1167         int ret;
1168
1169         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1170             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1171                 return 0;
1172
1173         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1174                 tw32_f(MAC_MI_MODE,
1175                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1176                 udelay(80);
1177         }
1178
1179         tg3_ape_lock(tp, tp->phy_ape_lock);
1180
1181         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1182                       MI_COM_PHY_ADDR_MASK);
1183         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1184                       MI_COM_REG_ADDR_MASK);
1185         frame_val |= (val & MI_COM_DATA_MASK);
1186         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1187
1188         tw32_f(MAC_MI_COM, frame_val);
1189
1190         loops = PHY_BUSY_LOOPS;
1191         while (loops != 0) {
1192                 udelay(10);
1193                 frame_val = tr32(MAC_MI_COM);
1194                 if ((frame_val & MI_COM_BUSY) == 0) {
1195                         udelay(5);
1196                         frame_val = tr32(MAC_MI_COM);
1197                         break;
1198                 }
1199                 loops -= 1;
1200         }
1201
1202         ret = -EBUSY;
1203         if (loops != 0)
1204                 ret = 0;
1205
1206         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1207                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1208                 udelay(80);
1209         }
1210
1211         tg3_ape_unlock(tp, tp->phy_ape_lock);
1212
1213         return ret;
1214 }
1215
1216 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1217 {
1218         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1219 }
1220
1221 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1222 {
1223         int err;
1224
1225         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1226         if (err)
1227                 goto done;
1228
1229         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1230         if (err)
1231                 goto done;
1232
1233         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1234                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1235         if (err)
1236                 goto done;
1237
1238         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1239
1240 done:
1241         return err;
1242 }
1243
1244 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1245 {
1246         int err;
1247
1248         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1249         if (err)
1250                 goto done;
1251
1252         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1253         if (err)
1254                 goto done;
1255
1256         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1257                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1258         if (err)
1259                 goto done;
1260
1261         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1262
1263 done:
1264         return err;
1265 }
1266
1267 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1268 {
1269         int err;
1270
1271         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1272         if (!err)
1273                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1274
1275         return err;
1276 }
1277
1278 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1279 {
1280         int err;
1281
1282         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1283         if (!err)
1284                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1285
1286         return err;
1287 }
1288
1289 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1290 {
1291         int err;
1292
1293         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1294                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1295                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1296         if (!err)
1297                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1298
1299         return err;
1300 }
1301
1302 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1303 {
1304         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1305                 set |= MII_TG3_AUXCTL_MISC_WREN;
1306
1307         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1308 }
1309
1310 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1311 {
1312         u32 val;
1313         int err;
1314
1315         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1316
1317         if (err)
1318                 return err;
1319
1320         if (enable)
1321                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1322         else
1323                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1324
1325         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1326                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1327
1328         return err;
1329 }
1330
1331 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1332 {
1333         return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1334                             reg | val | MII_TG3_MISC_SHDW_WREN);
1335 }
1336
1337 static int tg3_bmcr_reset(struct tg3 *tp)
1338 {
1339         u32 phy_control;
1340         int limit, err;
1341
1342         /* OK, reset it, and poll the BMCR_RESET bit until it
1343          * clears or we time out.
1344          */
1345         phy_control = BMCR_RESET;
1346         err = tg3_writephy(tp, MII_BMCR, phy_control);
1347         if (err != 0)
1348                 return -EBUSY;
1349
1350         limit = 5000;
1351         while (limit--) {
1352                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1353                 if (err != 0)
1354                         return -EBUSY;
1355
1356                 if ((phy_control & BMCR_RESET) == 0) {
1357                         udelay(40);
1358                         break;
1359                 }
1360                 udelay(10);
1361         }
1362         if (limit < 0)
1363                 return -EBUSY;
1364
1365         return 0;
1366 }
1367
1368 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1369 {
1370         struct tg3 *tp = bp->priv;
1371         u32 val;
1372
1373         spin_lock_bh(&tp->lock);
1374
1375         if (__tg3_readphy(tp, mii_id, reg, &val))
1376                 val = -EIO;
1377
1378         spin_unlock_bh(&tp->lock);
1379
1380         return val;
1381 }
1382
1383 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1384 {
1385         struct tg3 *tp = bp->priv;
1386         u32 ret = 0;
1387
1388         spin_lock_bh(&tp->lock);
1389
1390         if (__tg3_writephy(tp, mii_id, reg, val))
1391                 ret = -EIO;
1392
1393         spin_unlock_bh(&tp->lock);
1394
1395         return ret;
1396 }
1397
1398 static void tg3_mdio_config_5785(struct tg3 *tp)
1399 {
1400         u32 val;
1401         struct phy_device *phydev;
1402
1403         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1404         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1405         case PHY_ID_BCM50610:
1406         case PHY_ID_BCM50610M:
1407                 val = MAC_PHYCFG2_50610_LED_MODES;
1408                 break;
1409         case PHY_ID_BCMAC131:
1410                 val = MAC_PHYCFG2_AC131_LED_MODES;
1411                 break;
1412         case PHY_ID_RTL8211C:
1413                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1414                 break;
1415         case PHY_ID_RTL8201E:
1416                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1417                 break;
1418         default:
1419                 return;
1420         }
1421
1422         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1423                 tw32(MAC_PHYCFG2, val);
1424
1425                 val = tr32(MAC_PHYCFG1);
1426                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1427                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1428                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1429                 tw32(MAC_PHYCFG1, val);
1430
1431                 return;
1432         }
1433
1434         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1435                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1436                        MAC_PHYCFG2_FMODE_MASK_MASK |
1437                        MAC_PHYCFG2_GMODE_MASK_MASK |
1438                        MAC_PHYCFG2_ACT_MASK_MASK   |
1439                        MAC_PHYCFG2_QUAL_MASK_MASK |
1440                        MAC_PHYCFG2_INBAND_ENABLE;
1441
1442         tw32(MAC_PHYCFG2, val);
1443
1444         val = tr32(MAC_PHYCFG1);
1445         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1446                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1447         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1448                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1449                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1450                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1451                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1452         }
1453         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1454                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1455         tw32(MAC_PHYCFG1, val);
1456
1457         val = tr32(MAC_EXT_RGMII_MODE);
1458         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1459                  MAC_RGMII_MODE_RX_QUALITY |
1460                  MAC_RGMII_MODE_RX_ACTIVITY |
1461                  MAC_RGMII_MODE_RX_ENG_DET |
1462                  MAC_RGMII_MODE_TX_ENABLE |
1463                  MAC_RGMII_MODE_TX_LOWPWR |
1464                  MAC_RGMII_MODE_TX_RESET);
1465         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1466                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1467                         val |= MAC_RGMII_MODE_RX_INT_B |
1468                                MAC_RGMII_MODE_RX_QUALITY |
1469                                MAC_RGMII_MODE_RX_ACTIVITY |
1470                                MAC_RGMII_MODE_RX_ENG_DET;
1471                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1472                         val |= MAC_RGMII_MODE_TX_ENABLE |
1473                                MAC_RGMII_MODE_TX_LOWPWR |
1474                                MAC_RGMII_MODE_TX_RESET;
1475         }
1476         tw32(MAC_EXT_RGMII_MODE, val);
1477 }
1478
1479 static void tg3_mdio_start(struct tg3 *tp)
1480 {
1481         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1482         tw32_f(MAC_MI_MODE, tp->mi_mode);
1483         udelay(80);
1484
1485         if (tg3_flag(tp, MDIOBUS_INITED) &&
1486             tg3_asic_rev(tp) == ASIC_REV_5785)
1487                 tg3_mdio_config_5785(tp);
1488 }
1489
1490 static int tg3_mdio_init(struct tg3 *tp)
1491 {
1492         int i;
1493         u32 reg;
1494         struct phy_device *phydev;
1495
1496         if (tg3_flag(tp, 5717_PLUS)) {
1497                 u32 is_serdes;
1498
1499                 tp->phy_addr = tp->pci_fn + 1;
1500
1501                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1502                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1503                 else
1504                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1505                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1506                 if (is_serdes)
1507                         tp->phy_addr += 7;
1508         } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1509                 int addr;
1510
1511                 addr = ssb_gige_get_phyaddr(tp->pdev);
1512                 if (addr < 0)
1513                         return addr;
1514                 tp->phy_addr = addr;
1515         } else
1516                 tp->phy_addr = TG3_PHY_MII_ADDR;
1517
1518         tg3_mdio_start(tp);
1519
1520         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1521                 return 0;
1522
1523         tp->mdio_bus = mdiobus_alloc();
1524         if (tp->mdio_bus == NULL)
1525                 return -ENOMEM;
1526
1527         tp->mdio_bus->name     = "tg3 mdio bus";
1528         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1529                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1530         tp->mdio_bus->priv     = tp;
1531         tp->mdio_bus->parent   = &tp->pdev->dev;
1532         tp->mdio_bus->read     = &tg3_mdio_read;
1533         tp->mdio_bus->write    = &tg3_mdio_write;
1534         tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1535
1536         /* The bus registration will look for all the PHYs on the mdio bus.
1537          * Unfortunately, it does not ensure the PHY is powered up before
1538          * accessing the PHY ID registers.  A chip reset is the
1539          * quickest way to bring the device back to an operational state..
1540          */
1541         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1542                 tg3_bmcr_reset(tp);
1543
1544         i = mdiobus_register(tp->mdio_bus);
1545         if (i) {
1546                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1547                 mdiobus_free(tp->mdio_bus);
1548                 return i;
1549         }
1550
1551         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1552
1553         if (!phydev || !phydev->drv) {
1554                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1555                 mdiobus_unregister(tp->mdio_bus);
1556                 mdiobus_free(tp->mdio_bus);
1557                 return -ENODEV;
1558         }
1559
1560         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1561         case PHY_ID_BCM57780:
1562                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1563                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1564                 break;
1565         case PHY_ID_BCM50610:
1566         case PHY_ID_BCM50610M:
1567                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1568                                      PHY_BRCM_RX_REFCLK_UNUSED |
1569                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1570                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1571                 fallthrough;
1572         case PHY_ID_RTL8211C:
1573                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1574                 break;
1575         case PHY_ID_RTL8201E:
1576         case PHY_ID_BCMAC131:
1577                 phydev->interface = PHY_INTERFACE_MODE_MII;
1578                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1579                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1580                 break;
1581         }
1582
1583         tg3_flag_set(tp, MDIOBUS_INITED);
1584
1585         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1586                 tg3_mdio_config_5785(tp);
1587
1588         return 0;
1589 }
1590
1591 static void tg3_mdio_fini(struct tg3 *tp)
1592 {
1593         if (tg3_flag(tp, MDIOBUS_INITED)) {
1594                 tg3_flag_clear(tp, MDIOBUS_INITED);
1595                 mdiobus_unregister(tp->mdio_bus);
1596                 mdiobus_free(tp->mdio_bus);
1597         }
1598 }
1599
1600 /* tp->lock is held. */
1601 static inline void tg3_generate_fw_event(struct tg3 *tp)
1602 {
1603         u32 val;
1604
1605         val = tr32(GRC_RX_CPU_EVENT);
1606         val |= GRC_RX_CPU_DRIVER_EVENT;
1607         tw32_f(GRC_RX_CPU_EVENT, val);
1608
1609         tp->last_event_jiffies = jiffies;
1610 }
1611
1612 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1613
1614 /* tp->lock is held. */
1615 static void tg3_wait_for_event_ack(struct tg3 *tp)
1616 {
1617         int i;
1618         unsigned int delay_cnt;
1619         long time_remain;
1620
1621         /* If enough time has passed, no wait is necessary. */
1622         time_remain = (long)(tp->last_event_jiffies + 1 +
1623                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1624                       (long)jiffies;
1625         if (time_remain < 0)
1626                 return;
1627
1628         /* Check if we can shorten the wait time. */
1629         delay_cnt = jiffies_to_usecs(time_remain);
1630         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1631                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1632         delay_cnt = (delay_cnt >> 3) + 1;
1633
1634         for (i = 0; i < delay_cnt; i++) {
1635                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1636                         break;
1637                 if (pci_channel_offline(tp->pdev))
1638                         break;
1639
1640                 udelay(8);
1641         }
1642 }
1643
1644 /* tp->lock is held. */
1645 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1646 {
1647         u32 reg, val;
1648
1649         val = 0;
1650         if (!tg3_readphy(tp, MII_BMCR, &reg))
1651                 val = reg << 16;
1652         if (!tg3_readphy(tp, MII_BMSR, &reg))
1653                 val |= (reg & 0xffff);
1654         *data++ = val;
1655
1656         val = 0;
1657         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1658                 val = reg << 16;
1659         if (!tg3_readphy(tp, MII_LPA, &reg))
1660                 val |= (reg & 0xffff);
1661         *data++ = val;
1662
1663         val = 0;
1664         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1665                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1666                         val = reg << 16;
1667                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1668                         val |= (reg & 0xffff);
1669         }
1670         *data++ = val;
1671
1672         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1673                 val = reg << 16;
1674         else
1675                 val = 0;
1676         *data++ = val;
1677 }
1678
1679 /* tp->lock is held. */
1680 static void tg3_ump_link_report(struct tg3 *tp)
1681 {
1682         u32 data[4];
1683
1684         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1685                 return;
1686
1687         tg3_phy_gather_ump_data(tp, data);
1688
1689         tg3_wait_for_event_ack(tp);
1690
1691         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1692         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1693         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1694         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1695         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1696         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1697
1698         tg3_generate_fw_event(tp);
1699 }
1700
1701 /* tp->lock is held. */
1702 static void tg3_stop_fw(struct tg3 *tp)
1703 {
1704         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1705                 /* Wait for RX cpu to ACK the previous event. */
1706                 tg3_wait_for_event_ack(tp);
1707
1708                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1709
1710                 tg3_generate_fw_event(tp);
1711
1712                 /* Wait for RX cpu to ACK this event. */
1713                 tg3_wait_for_event_ack(tp);
1714         }
1715 }
1716
1717 /* tp->lock is held. */
1718 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1719 {
1720         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1721                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1722
1723         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1724                 switch (kind) {
1725                 case RESET_KIND_INIT:
1726                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1727                                       DRV_STATE_START);
1728                         break;
1729
1730                 case RESET_KIND_SHUTDOWN:
1731                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1732                                       DRV_STATE_UNLOAD);
1733                         break;
1734
1735                 case RESET_KIND_SUSPEND:
1736                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1737                                       DRV_STATE_SUSPEND);
1738                         break;
1739
1740                 default:
1741                         break;
1742                 }
1743         }
1744 }
1745
1746 /* tp->lock is held. */
1747 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1748 {
1749         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1750                 switch (kind) {
1751                 case RESET_KIND_INIT:
1752                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1753                                       DRV_STATE_START_DONE);
1754                         break;
1755
1756                 case RESET_KIND_SHUTDOWN:
1757                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1758                                       DRV_STATE_UNLOAD_DONE);
1759                         break;
1760
1761                 default:
1762                         break;
1763                 }
1764         }
1765 }
1766
1767 /* tp->lock is held. */
1768 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1769 {
1770         if (tg3_flag(tp, ENABLE_ASF)) {
1771                 switch (kind) {
1772                 case RESET_KIND_INIT:
1773                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1774                                       DRV_STATE_START);
1775                         break;
1776
1777                 case RESET_KIND_SHUTDOWN:
1778                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1779                                       DRV_STATE_UNLOAD);
1780                         break;
1781
1782                 case RESET_KIND_SUSPEND:
1783                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1784                                       DRV_STATE_SUSPEND);
1785                         break;
1786
1787                 default:
1788                         break;
1789                 }
1790         }
1791 }
1792
1793 static int tg3_poll_fw(struct tg3 *tp)
1794 {
1795         int i;
1796         u32 val;
1797
1798         if (tg3_flag(tp, NO_FWARE_REPORTED))
1799                 return 0;
1800
1801         if (tg3_flag(tp, IS_SSB_CORE)) {
1802                 /* We don't use firmware. */
1803                 return 0;
1804         }
1805
1806         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1807                 /* Wait up to 20ms for init done. */
1808                 for (i = 0; i < 200; i++) {
1809                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1810                                 return 0;
1811                         if (pci_channel_offline(tp->pdev))
1812                                 return -ENODEV;
1813
1814                         udelay(100);
1815                 }
1816                 return -ENODEV;
1817         }
1818
1819         /* Wait for firmware initialization to complete. */
1820         for (i = 0; i < 100000; i++) {
1821                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1822                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1823                         break;
1824                 if (pci_channel_offline(tp->pdev)) {
1825                         if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1826                                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1827                                 netdev_info(tp->dev, "No firmware running\n");
1828                         }
1829
1830                         break;
1831                 }
1832
1833                 udelay(10);
1834         }
1835
1836         /* Chip might not be fitted with firmware.  Some Sun onboard
1837          * parts are configured like that.  So don't signal the timeout
1838          * of the above loop as an error, but do report the lack of
1839          * running firmware once.
1840          */
1841         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1842                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1843
1844                 netdev_info(tp->dev, "No firmware running\n");
1845         }
1846
1847         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1848                 /* The 57765 A0 needs a little more
1849                  * time to do some important work.
1850                  */
1851                 mdelay(10);
1852         }
1853
1854         return 0;
1855 }
1856
1857 static void tg3_link_report(struct tg3 *tp)
1858 {
1859         if (!netif_carrier_ok(tp->dev)) {
1860                 netif_info(tp, link, tp->dev, "Link is down\n");
1861                 tg3_ump_link_report(tp);
1862         } else if (netif_msg_link(tp)) {
1863                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1864                             (tp->link_config.active_speed == SPEED_1000 ?
1865                              1000 :
1866                              (tp->link_config.active_speed == SPEED_100 ?
1867                               100 : 10)),
1868                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1869                              "full" : "half"));
1870
1871                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1872                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1873                             "on" : "off",
1874                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1875                             "on" : "off");
1876
1877                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1878                         netdev_info(tp->dev, "EEE is %s\n",
1879                                     tp->setlpicnt ? "enabled" : "disabled");
1880
1881                 tg3_ump_link_report(tp);
1882         }
1883
1884         tp->link_up = netif_carrier_ok(tp->dev);
1885 }
1886
1887 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1888 {
1889         u32 flowctrl = 0;
1890
1891         if (adv & ADVERTISE_PAUSE_CAP) {
1892                 flowctrl |= FLOW_CTRL_RX;
1893                 if (!(adv & ADVERTISE_PAUSE_ASYM))
1894                         flowctrl |= FLOW_CTRL_TX;
1895         } else if (adv & ADVERTISE_PAUSE_ASYM)
1896                 flowctrl |= FLOW_CTRL_TX;
1897
1898         return flowctrl;
1899 }
1900
1901 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1902 {
1903         u16 miireg;
1904
1905         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1906                 miireg = ADVERTISE_1000XPAUSE;
1907         else if (flow_ctrl & FLOW_CTRL_TX)
1908                 miireg = ADVERTISE_1000XPSE_ASYM;
1909         else if (flow_ctrl & FLOW_CTRL_RX)
1910                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1911         else
1912                 miireg = 0;
1913
1914         return miireg;
1915 }
1916
1917 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1918 {
1919         u32 flowctrl = 0;
1920
1921         if (adv & ADVERTISE_1000XPAUSE) {
1922                 flowctrl |= FLOW_CTRL_RX;
1923                 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1924                         flowctrl |= FLOW_CTRL_TX;
1925         } else if (adv & ADVERTISE_1000XPSE_ASYM)
1926                 flowctrl |= FLOW_CTRL_TX;
1927
1928         return flowctrl;
1929 }
1930
1931 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1932 {
1933         u8 cap = 0;
1934
1935         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1936                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1937         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1938                 if (lcladv & ADVERTISE_1000XPAUSE)
1939                         cap = FLOW_CTRL_RX;
1940                 if (rmtadv & ADVERTISE_1000XPAUSE)
1941                         cap = FLOW_CTRL_TX;
1942         }
1943
1944         return cap;
1945 }
1946
1947 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1948 {
1949         u8 autoneg;
1950         u8 flowctrl = 0;
1951         u32 old_rx_mode = tp->rx_mode;
1952         u32 old_tx_mode = tp->tx_mode;
1953
1954         if (tg3_flag(tp, USE_PHYLIB))
1955                 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1956         else
1957                 autoneg = tp->link_config.autoneg;
1958
1959         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1960                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1961                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1962                 else
1963                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1964         } else
1965                 flowctrl = tp->link_config.flowctrl;
1966
1967         tp->link_config.active_flowctrl = flowctrl;
1968
1969         if (flowctrl & FLOW_CTRL_RX)
1970                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1971         else
1972                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1973
1974         if (old_rx_mode != tp->rx_mode)
1975                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1976
1977         if (flowctrl & FLOW_CTRL_TX)
1978                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1979         else
1980                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1981
1982         if (old_tx_mode != tp->tx_mode)
1983                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1984 }
1985
1986 static void tg3_adjust_link(struct net_device *dev)
1987 {
1988         u8 oldflowctrl, linkmesg = 0;
1989         u32 mac_mode, lcl_adv, rmt_adv;
1990         struct tg3 *tp = netdev_priv(dev);
1991         struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1992
1993         spin_lock_bh(&tp->lock);
1994
1995         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1996                                     MAC_MODE_HALF_DUPLEX);
1997
1998         oldflowctrl = tp->link_config.active_flowctrl;
1999
2000         if (phydev->link) {
2001                 lcl_adv = 0;
2002                 rmt_adv = 0;
2003
2004                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2005                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2006                 else if (phydev->speed == SPEED_1000 ||
2007                          tg3_asic_rev(tp) != ASIC_REV_5785)
2008                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
2009                 else
2010                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2011
2012                 if (phydev->duplex == DUPLEX_HALF)
2013                         mac_mode |= MAC_MODE_HALF_DUPLEX;
2014                 else {
2015                         lcl_adv = mii_advertise_flowctrl(
2016                                   tp->link_config.flowctrl);
2017
2018                         if (phydev->pause)
2019                                 rmt_adv = LPA_PAUSE_CAP;
2020                         if (phydev->asym_pause)
2021                                 rmt_adv |= LPA_PAUSE_ASYM;
2022                 }
2023
2024                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2025         } else
2026                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2027
2028         if (mac_mode != tp->mac_mode) {
2029                 tp->mac_mode = mac_mode;
2030                 tw32_f(MAC_MODE, tp->mac_mode);
2031                 udelay(40);
2032         }
2033
2034         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2035                 if (phydev->speed == SPEED_10)
2036                         tw32(MAC_MI_STAT,
2037                              MAC_MI_STAT_10MBPS_MODE |
2038                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2039                 else
2040                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2041         }
2042
2043         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2044                 tw32(MAC_TX_LENGTHS,
2045                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2046                       (6 << TX_LENGTHS_IPG_SHIFT) |
2047                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2048         else
2049                 tw32(MAC_TX_LENGTHS,
2050                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2051                       (6 << TX_LENGTHS_IPG_SHIFT) |
2052                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2053
2054         if (phydev->link != tp->old_link ||
2055             phydev->speed != tp->link_config.active_speed ||
2056             phydev->duplex != tp->link_config.active_duplex ||
2057             oldflowctrl != tp->link_config.active_flowctrl)
2058                 linkmesg = 1;
2059
2060         tp->old_link = phydev->link;
2061         tp->link_config.active_speed = phydev->speed;
2062         tp->link_config.active_duplex = phydev->duplex;
2063
2064         spin_unlock_bh(&tp->lock);
2065
2066         if (linkmesg)
2067                 tg3_link_report(tp);
2068 }
2069
2070 static int tg3_phy_init(struct tg3 *tp)
2071 {
2072         struct phy_device *phydev;
2073
2074         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2075                 return 0;
2076
2077         /* Bring the PHY back to a known state. */
2078         tg3_bmcr_reset(tp);
2079
2080         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2081
2082         /* Attach the MAC to the PHY. */
2083         phydev = phy_connect(tp->dev, phydev_name(phydev),
2084                              tg3_adjust_link, phydev->interface);
2085         if (IS_ERR(phydev)) {
2086                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2087                 return PTR_ERR(phydev);
2088         }
2089
2090         /* Mask with MAC supported features. */
2091         switch (phydev->interface) {
2092         case PHY_INTERFACE_MODE_GMII:
2093         case PHY_INTERFACE_MODE_RGMII:
2094                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2095                         phy_set_max_speed(phydev, SPEED_1000);
2096                         phy_support_asym_pause(phydev);
2097                         break;
2098                 }
2099                 fallthrough;
2100         case PHY_INTERFACE_MODE_MII:
2101                 phy_set_max_speed(phydev, SPEED_100);
2102                 phy_support_asym_pause(phydev);
2103                 break;
2104         default:
2105                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2106                 return -EINVAL;
2107         }
2108
2109         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2110
2111         phy_attached_info(phydev);
2112
2113         return 0;
2114 }
2115
2116 static void tg3_phy_start(struct tg3 *tp)
2117 {
2118         struct phy_device *phydev;
2119
2120         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2121                 return;
2122
2123         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2124
2125         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2126                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2127                 phydev->speed = tp->link_config.speed;
2128                 phydev->duplex = tp->link_config.duplex;
2129                 phydev->autoneg = tp->link_config.autoneg;
2130                 ethtool_convert_legacy_u32_to_link_mode(
2131                         phydev->advertising, tp->link_config.advertising);
2132         }
2133
2134         phy_start(phydev);
2135
2136         phy_start_aneg(phydev);
2137 }
2138
2139 static void tg3_phy_stop(struct tg3 *tp)
2140 {
2141         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2142                 return;
2143
2144         phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2145 }
2146
2147 static void tg3_phy_fini(struct tg3 *tp)
2148 {
2149         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2150                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2151                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2152         }
2153 }
2154
2155 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2156 {
2157         int err;
2158         u32 val;
2159
2160         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2161                 return 0;
2162
2163         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2164                 /* Cannot do read-modify-write on 5401 */
2165                 err = tg3_phy_auxctl_write(tp,
2166                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2167                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2168                                            0x4c20);
2169                 goto done;
2170         }
2171
2172         err = tg3_phy_auxctl_read(tp,
2173                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2174         if (err)
2175                 return err;
2176
2177         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2178         err = tg3_phy_auxctl_write(tp,
2179                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2180
2181 done:
2182         return err;
2183 }
2184
2185 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2186 {
2187         u32 phytest;
2188
2189         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2190                 u32 phy;
2191
2192                 tg3_writephy(tp, MII_TG3_FET_TEST,
2193                              phytest | MII_TG3_FET_SHADOW_EN);
2194                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2195                         if (enable)
2196                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2197                         else
2198                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2199                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2200                 }
2201                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2202         }
2203 }
2204
2205 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2206 {
2207         u32 reg;
2208
2209         if (!tg3_flag(tp, 5705_PLUS) ||
2210             (tg3_flag(tp, 5717_PLUS) &&
2211              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2212                 return;
2213
2214         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2215                 tg3_phy_fet_toggle_apd(tp, enable);
2216                 return;
2217         }
2218
2219         reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2220               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2221               MII_TG3_MISC_SHDW_SCR5_SDTL |
2222               MII_TG3_MISC_SHDW_SCR5_C125OE;
2223         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2224                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2225
2226         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2227
2228
2229         reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2230         if (enable)
2231                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2232
2233         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2234 }
2235
2236 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2237 {
2238         u32 phy;
2239
2240         if (!tg3_flag(tp, 5705_PLUS) ||
2241             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2242                 return;
2243
2244         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2245                 u32 ephy;
2246
2247                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2248                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2249
2250                         tg3_writephy(tp, MII_TG3_FET_TEST,
2251                                      ephy | MII_TG3_FET_SHADOW_EN);
2252                         if (!tg3_readphy(tp, reg, &phy)) {
2253                                 if (enable)
2254                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2255                                 else
2256                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2257                                 tg3_writephy(tp, reg, phy);
2258                         }
2259                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2260                 }
2261         } else {
2262                 int ret;
2263
2264                 ret = tg3_phy_auxctl_read(tp,
2265                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2266                 if (!ret) {
2267                         if (enable)
2268                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2269                         else
2270                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2271                         tg3_phy_auxctl_write(tp,
2272                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2273                 }
2274         }
2275 }
2276
2277 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2278 {
2279         int ret;
2280         u32 val;
2281
2282         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2283                 return;
2284
2285         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2286         if (!ret)
2287                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2288                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2289 }
2290
2291 static void tg3_phy_apply_otp(struct tg3 *tp)
2292 {
2293         u32 otp, phy;
2294
2295         if (!tp->phy_otp)
2296                 return;
2297
2298         otp = tp->phy_otp;
2299
2300         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2301                 return;
2302
2303         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2304         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2305         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2306
2307         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2308               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2309         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2310
2311         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2312         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2313         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2314
2315         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2316         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2317
2318         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2319         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2320
2321         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2322               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2323         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2324
2325         tg3_phy_toggle_auxctl_smdsp(tp, false);
2326 }
2327
2328 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2329 {
2330         u32 val;
2331         struct ethtool_eee *dest = &tp->eee;
2332
2333         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2334                 return;
2335
2336         if (eee)
2337                 dest = eee;
2338
2339         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2340                 return;
2341
2342         /* Pull eee_active */
2343         if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2344             val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2345                 dest->eee_active = 1;
2346         } else
2347                 dest->eee_active = 0;
2348
2349         /* Pull lp advertised settings */
2350         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2351                 return;
2352         dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2353
2354         /* Pull advertised and eee_enabled settings */
2355         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2356                 return;
2357         dest->eee_enabled = !!val;
2358         dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2359
2360         /* Pull tx_lpi_enabled */
2361         val = tr32(TG3_CPMU_EEE_MODE);
2362         dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2363
2364         /* Pull lpi timer value */
2365         dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2366 }
2367
2368 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2369 {
2370         u32 val;
2371
2372         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2373                 return;
2374
2375         tp->setlpicnt = 0;
2376
2377         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2378             current_link_up &&
2379             tp->link_config.active_duplex == DUPLEX_FULL &&
2380             (tp->link_config.active_speed == SPEED_100 ||
2381              tp->link_config.active_speed == SPEED_1000)) {
2382                 u32 eeectl;
2383
2384                 if (tp->link_config.active_speed == SPEED_1000)
2385                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2386                 else
2387                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2388
2389                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2390
2391                 tg3_eee_pull_config(tp, NULL);
2392                 if (tp->eee.eee_active)
2393                         tp->setlpicnt = 2;
2394         }
2395
2396         if (!tp->setlpicnt) {
2397                 if (current_link_up &&
2398                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2399                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2400                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2401                 }
2402
2403                 val = tr32(TG3_CPMU_EEE_MODE);
2404                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2405         }
2406 }
2407
2408 static void tg3_phy_eee_enable(struct tg3 *tp)
2409 {
2410         u32 val;
2411
2412         if (tp->link_config.active_speed == SPEED_1000 &&
2413             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2414              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2415              tg3_flag(tp, 57765_CLASS)) &&
2416             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2417                 val = MII_TG3_DSP_TAP26_ALNOKO |
2418                       MII_TG3_DSP_TAP26_RMRXSTO;
2419                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2420                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2421         }
2422
2423         val = tr32(TG3_CPMU_EEE_MODE);
2424         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2425 }
2426
2427 static int tg3_wait_macro_done(struct tg3 *tp)
2428 {
2429         int limit = 100;
2430
2431         while (limit--) {
2432                 u32 tmp32;
2433
2434                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2435                         if ((tmp32 & 0x1000) == 0)
2436                                 break;
2437                 }
2438         }
2439         if (limit < 0)
2440                 return -EBUSY;
2441
2442         return 0;
2443 }
2444
2445 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2446 {
2447         static const u32 test_pat[4][6] = {
2448         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2449         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2450         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2451         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2452         };
2453         int chan;
2454
2455         for (chan = 0; chan < 4; chan++) {
2456                 int i;
2457
2458                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2459                              (chan * 0x2000) | 0x0200);
2460                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2461
2462                 for (i = 0; i < 6; i++)
2463                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2464                                      test_pat[chan][i]);
2465
2466                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2467                 if (tg3_wait_macro_done(tp)) {
2468                         *resetp = 1;
2469                         return -EBUSY;
2470                 }
2471
2472                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2473                              (chan * 0x2000) | 0x0200);
2474                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2475                 if (tg3_wait_macro_done(tp)) {
2476                         *resetp = 1;
2477                         return -EBUSY;
2478                 }
2479
2480                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2481                 if (tg3_wait_macro_done(tp)) {
2482                         *resetp = 1;
2483                         return -EBUSY;
2484                 }
2485
2486                 for (i = 0; i < 6; i += 2) {
2487                         u32 low, high;
2488
2489                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2490                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2491                             tg3_wait_macro_done(tp)) {
2492                                 *resetp = 1;
2493                                 return -EBUSY;
2494                         }
2495                         low &= 0x7fff;
2496                         high &= 0x000f;
2497                         if (low != test_pat[chan][i] ||
2498                             high != test_pat[chan][i+1]) {
2499                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2500                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2501                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2502
2503                                 return -EBUSY;
2504                         }
2505                 }
2506         }
2507
2508         return 0;
2509 }
2510
2511 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2512 {
2513         int chan;
2514
2515         for (chan = 0; chan < 4; chan++) {
2516                 int i;
2517
2518                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2519                              (chan * 0x2000) | 0x0200);
2520                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2521                 for (i = 0; i < 6; i++)
2522                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2523                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2524                 if (tg3_wait_macro_done(tp))
2525                         return -EBUSY;
2526         }
2527
2528         return 0;
2529 }
2530
2531 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2532 {
2533         u32 reg32, phy9_orig;
2534         int retries, do_phy_reset, err;
2535
2536         retries = 10;
2537         do_phy_reset = 1;
2538         do {
2539                 if (do_phy_reset) {
2540                         err = tg3_bmcr_reset(tp);
2541                         if (err)
2542                                 return err;
2543                         do_phy_reset = 0;
2544                 }
2545
2546                 /* Disable transmitter and interrupt.  */
2547                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2548                         continue;
2549
2550                 reg32 |= 0x3000;
2551                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2552
2553                 /* Set full-duplex, 1000 mbps.  */
2554                 tg3_writephy(tp, MII_BMCR,
2555                              BMCR_FULLDPLX | BMCR_SPEED1000);
2556
2557                 /* Set to master mode.  */
2558                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2559                         continue;
2560
2561                 tg3_writephy(tp, MII_CTRL1000,
2562                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2563
2564                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2565                 if (err)
2566                         return err;
2567
2568                 /* Block the PHY control access.  */
2569                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2570
2571                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2572                 if (!err)
2573                         break;
2574         } while (--retries);
2575
2576         err = tg3_phy_reset_chanpat(tp);
2577         if (err)
2578                 return err;
2579
2580         tg3_phydsp_write(tp, 0x8005, 0x0000);
2581
2582         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2583         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2584
2585         tg3_phy_toggle_auxctl_smdsp(tp, false);
2586
2587         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2588
2589         err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2590         if (err)
2591                 return err;
2592
2593         reg32 &= ~0x3000;
2594         tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2595
2596         return 0;
2597 }
2598
2599 static void tg3_carrier_off(struct tg3 *tp)
2600 {
2601         netif_carrier_off(tp->dev);
2602         tp->link_up = false;
2603 }
2604
2605 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2606 {
2607         if (tg3_flag(tp, ENABLE_ASF))
2608                 netdev_warn(tp->dev,
2609                             "Management side-band traffic will be interrupted during phy settings change\n");
2610 }
2611
2612 /* This will reset the tigon3 PHY if there is no valid
2613  * link unless the FORCE argument is non-zero.
2614  */
2615 static int tg3_phy_reset(struct tg3 *tp)
2616 {
2617         u32 val, cpmuctrl;
2618         int err;
2619
2620         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2621                 val = tr32(GRC_MISC_CFG);
2622                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2623                 udelay(40);
2624         }
2625         err  = tg3_readphy(tp, MII_BMSR, &val);
2626         err |= tg3_readphy(tp, MII_BMSR, &val);
2627         if (err != 0)
2628                 return -EBUSY;
2629
2630         if (netif_running(tp->dev) && tp->link_up) {
2631                 netif_carrier_off(tp->dev);
2632                 tg3_link_report(tp);
2633         }
2634
2635         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2636             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2637             tg3_asic_rev(tp) == ASIC_REV_5705) {
2638                 err = tg3_phy_reset_5703_4_5(tp);
2639                 if (err)
2640                         return err;
2641                 goto out;
2642         }
2643
2644         cpmuctrl = 0;
2645         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2646             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2647                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2648                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2649                         tw32(TG3_CPMU_CTRL,
2650                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2651         }
2652
2653         err = tg3_bmcr_reset(tp);
2654         if (err)
2655                 return err;
2656
2657         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2658                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2659                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2660
2661                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2662         }
2663
2664         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2665             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2666                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2667                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2668                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2669                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2670                         udelay(40);
2671                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2672                 }
2673         }
2674
2675         if (tg3_flag(tp, 5717_PLUS) &&
2676             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2677                 return 0;
2678
2679         tg3_phy_apply_otp(tp);
2680
2681         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2682                 tg3_phy_toggle_apd(tp, true);
2683         else
2684                 tg3_phy_toggle_apd(tp, false);
2685
2686 out:
2687         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2688             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2689                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2690                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2691                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2692         }
2693
2694         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2695                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2696                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2697         }
2698
2699         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2700                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2701                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2702                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2703                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2704                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2705                 }
2706         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2707                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2708                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2709                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2710                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2711                                 tg3_writephy(tp, MII_TG3_TEST1,
2712                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2713                         } else
2714                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2715
2716                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2717                 }
2718         }
2719
2720         /* Set Extended packet length bit (bit 14) on all chips that */
2721         /* support jumbo frames */
2722         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2723                 /* Cannot do read-modify-write on 5401 */
2724                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2725         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2726                 /* Set bit 14 with read-modify-write to preserve other bits */
2727                 err = tg3_phy_auxctl_read(tp,
2728                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2729                 if (!err)
2730                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2731                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2732         }
2733
2734         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2735          * jumbo frames transmission.
2736          */
2737         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2738                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2739                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2740                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2741         }
2742
2743         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2744                 /* adjust output voltage */
2745                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2746         }
2747
2748         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2749                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2750
2751         tg3_phy_toggle_automdix(tp, true);
2752         tg3_phy_set_wirespeed(tp);
2753         return 0;
2754 }
2755
2756 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2757 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2758 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2759                                           TG3_GPIO_MSG_NEED_VAUX)
2760 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2761         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2762          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2763          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2764          (TG3_GPIO_MSG_DRVR_PRES << 12))
2765
2766 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2767         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2768          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2769          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2770          (TG3_GPIO_MSG_NEED_VAUX << 12))
2771
2772 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2773 {
2774         u32 status, shift;
2775
2776         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2777             tg3_asic_rev(tp) == ASIC_REV_5719)
2778                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2779         else
2780                 status = tr32(TG3_CPMU_DRV_STATUS);
2781
2782         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2783         status &= ~(TG3_GPIO_MSG_MASK << shift);
2784         status |= (newstat << shift);
2785
2786         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2787             tg3_asic_rev(tp) == ASIC_REV_5719)
2788                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2789         else
2790                 tw32(TG3_CPMU_DRV_STATUS, status);
2791
2792         return status >> TG3_APE_GPIO_MSG_SHIFT;
2793 }
2794
2795 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2796 {
2797         if (!tg3_flag(tp, IS_NIC))
2798                 return 0;
2799
2800         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2801             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2802             tg3_asic_rev(tp) == ASIC_REV_5720) {
2803                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2804                         return -EIO;
2805
2806                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2807
2808                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2809                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2810
2811                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2812         } else {
2813                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2814                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2815         }
2816
2817         return 0;
2818 }
2819
2820 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2821 {
2822         u32 grc_local_ctrl;
2823
2824         if (!tg3_flag(tp, IS_NIC) ||
2825             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2826             tg3_asic_rev(tp) == ASIC_REV_5701)
2827                 return;
2828
2829         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2830
2831         tw32_wait_f(GRC_LOCAL_CTRL,
2832                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2833                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2834
2835         tw32_wait_f(GRC_LOCAL_CTRL,
2836                     grc_local_ctrl,
2837                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2838
2839         tw32_wait_f(GRC_LOCAL_CTRL,
2840                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2841                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2842 }
2843
2844 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2845 {
2846         if (!tg3_flag(tp, IS_NIC))
2847                 return;
2848
2849         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2850             tg3_asic_rev(tp) == ASIC_REV_5701) {
2851                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2852                             (GRC_LCLCTRL_GPIO_OE0 |
2853                              GRC_LCLCTRL_GPIO_OE1 |
2854                              GRC_LCLCTRL_GPIO_OE2 |
2855                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2856                              GRC_LCLCTRL_GPIO_OUTPUT1),
2857                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2858         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2859                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2860                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2861                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2862                                      GRC_LCLCTRL_GPIO_OE1 |
2863                                      GRC_LCLCTRL_GPIO_OE2 |
2864                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2865                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2866                                      tp->grc_local_ctrl;
2867                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2868                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2869
2870                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2871                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2872                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2873
2874                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2875                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2876                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2877         } else {
2878                 u32 no_gpio2;
2879                 u32 grc_local_ctrl = 0;
2880
2881                 /* Workaround to prevent overdrawing Amps. */
2882                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2883                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2884                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2885                                     grc_local_ctrl,
2886                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2887                 }
2888
2889                 /* On 5753 and variants, GPIO2 cannot be used. */
2890                 no_gpio2 = tp->nic_sram_data_cfg &
2891                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2892
2893                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2894                                   GRC_LCLCTRL_GPIO_OE1 |
2895                                   GRC_LCLCTRL_GPIO_OE2 |
2896                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2897                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2898                 if (no_gpio2) {
2899                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2900                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2901                 }
2902                 tw32_wait_f(GRC_LOCAL_CTRL,
2903                             tp->grc_local_ctrl | grc_local_ctrl,
2904                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2905
2906                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2907
2908                 tw32_wait_f(GRC_LOCAL_CTRL,
2909                             tp->grc_local_ctrl | grc_local_ctrl,
2910                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2911
2912                 if (!no_gpio2) {
2913                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2914                         tw32_wait_f(GRC_LOCAL_CTRL,
2915                                     tp->grc_local_ctrl | grc_local_ctrl,
2916                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2917                 }
2918         }
2919 }
2920
2921 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2922 {
2923         u32 msg = 0;
2924
2925         /* Serialize power state transitions */
2926         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2927                 return;
2928
2929         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2930                 msg = TG3_GPIO_MSG_NEED_VAUX;
2931
2932         msg = tg3_set_function_status(tp, msg);
2933
2934         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2935                 goto done;
2936
2937         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2938                 tg3_pwrsrc_switch_to_vaux(tp);
2939         else
2940                 tg3_pwrsrc_die_with_vmain(tp);
2941
2942 done:
2943         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2944 }
2945
2946 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2947 {
2948         bool need_vaux = false;
2949
2950         /* The GPIOs do something completely different on 57765. */
2951         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2952                 return;
2953
2954         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2955             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2956             tg3_asic_rev(tp) == ASIC_REV_5720) {
2957                 tg3_frob_aux_power_5717(tp, include_wol ?
2958                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2959                 return;
2960         }
2961
2962         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2963                 struct net_device *dev_peer;
2964
2965                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2966
2967                 /* remove_one() may have been run on the peer. */
2968                 if (dev_peer) {
2969                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2970
2971                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2972                                 return;
2973
2974                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2975                             tg3_flag(tp_peer, ENABLE_ASF))
2976                                 need_vaux = true;
2977                 }
2978         }
2979
2980         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2981             tg3_flag(tp, ENABLE_ASF))
2982                 need_vaux = true;
2983
2984         if (need_vaux)
2985                 tg3_pwrsrc_switch_to_vaux(tp);
2986         else
2987                 tg3_pwrsrc_die_with_vmain(tp);
2988 }
2989
2990 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2991 {
2992         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2993                 return 1;
2994         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2995                 if (speed != SPEED_10)
2996                         return 1;
2997         } else if (speed == SPEED_10)
2998                 return 1;
2999
3000         return 0;
3001 }
3002
3003 static bool tg3_phy_power_bug(struct tg3 *tp)
3004 {
3005         switch (tg3_asic_rev(tp)) {
3006         case ASIC_REV_5700:
3007         case ASIC_REV_5704:
3008                 return true;
3009         case ASIC_REV_5780:
3010                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3011                         return true;
3012                 return false;
3013         case ASIC_REV_5717:
3014                 if (!tp->pci_fn)
3015                         return true;
3016                 return false;
3017         case ASIC_REV_5719:
3018         case ASIC_REV_5720:
3019                 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3020                     !tp->pci_fn)
3021                         return true;
3022                 return false;
3023         }
3024
3025         return false;
3026 }
3027
3028 static bool tg3_phy_led_bug(struct tg3 *tp)
3029 {
3030         switch (tg3_asic_rev(tp)) {
3031         case ASIC_REV_5719:
3032         case ASIC_REV_5720:
3033                 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3034                     !tp->pci_fn)
3035                         return true;
3036                 return false;
3037         }
3038
3039         return false;
3040 }
3041
3042 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3043 {
3044         u32 val;
3045
3046         if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3047                 return;
3048
3049         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3050                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3051                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3052                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3053
3054                         sg_dig_ctrl |=
3055                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3056                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
3057                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3058                 }
3059                 return;
3060         }
3061
3062         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3063                 tg3_bmcr_reset(tp);
3064                 val = tr32(GRC_MISC_CFG);
3065                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3066                 udelay(40);
3067                 return;
3068         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3069                 u32 phytest;
3070                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3071                         u32 phy;
3072
3073                         tg3_writephy(tp, MII_ADVERTISE, 0);
3074                         tg3_writephy(tp, MII_BMCR,
3075                                      BMCR_ANENABLE | BMCR_ANRESTART);
3076
3077                         tg3_writephy(tp, MII_TG3_FET_TEST,
3078                                      phytest | MII_TG3_FET_SHADOW_EN);
3079                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3080                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3081                                 tg3_writephy(tp,
3082                                              MII_TG3_FET_SHDW_AUXMODE4,
3083                                              phy);
3084                         }
3085                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3086                 }
3087                 return;
3088         } else if (do_low_power) {
3089                 if (!tg3_phy_led_bug(tp))
3090                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3091                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3092
3093                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3094                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3095                       MII_TG3_AUXCTL_PCTL_VREG_11V;
3096                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3097         }
3098
3099         /* The PHY should not be powered down on some chips because
3100          * of bugs.
3101          */
3102         if (tg3_phy_power_bug(tp))
3103                 return;
3104
3105         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3106             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3107                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3108                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3109                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3110                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3111         }
3112
3113         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3114 }
3115
3116 /* tp->lock is held. */
3117 static int tg3_nvram_lock(struct tg3 *tp)
3118 {
3119         if (tg3_flag(tp, NVRAM)) {
3120                 int i;
3121
3122                 if (tp->nvram_lock_cnt == 0) {
3123                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3124                         for (i = 0; i < 8000; i++) {
3125                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3126                                         break;
3127                                 udelay(20);
3128                         }
3129                         if (i == 8000) {
3130                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3131                                 return -ENODEV;
3132                         }
3133                 }
3134                 tp->nvram_lock_cnt++;
3135         }
3136         return 0;
3137 }
3138
3139 /* tp->lock is held. */
3140 static void tg3_nvram_unlock(struct tg3 *tp)
3141 {
3142         if (tg3_flag(tp, NVRAM)) {
3143                 if (tp->nvram_lock_cnt > 0)
3144                         tp->nvram_lock_cnt--;
3145                 if (tp->nvram_lock_cnt == 0)
3146                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3147         }
3148 }
3149
3150 /* tp->lock is held. */
3151 static void tg3_enable_nvram_access(struct tg3 *tp)
3152 {
3153         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3154                 u32 nvaccess = tr32(NVRAM_ACCESS);
3155
3156                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3157         }
3158 }
3159
3160 /* tp->lock is held. */
3161 static void tg3_disable_nvram_access(struct tg3 *tp)
3162 {
3163         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3164                 u32 nvaccess = tr32(NVRAM_ACCESS);
3165
3166                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3167         }
3168 }
3169
3170 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3171                                         u32 offset, u32 *val)
3172 {
3173         u32 tmp;
3174         int i;
3175
3176         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3177                 return -EINVAL;
3178
3179         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3180                                         EEPROM_ADDR_DEVID_MASK |
3181                                         EEPROM_ADDR_READ);
3182         tw32(GRC_EEPROM_ADDR,
3183              tmp |
3184              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3185              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3186               EEPROM_ADDR_ADDR_MASK) |
3187              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3188
3189         for (i = 0; i < 1000; i++) {
3190                 tmp = tr32(GRC_EEPROM_ADDR);
3191
3192                 if (tmp & EEPROM_ADDR_COMPLETE)
3193                         break;
3194                 msleep(1);
3195         }
3196         if (!(tmp & EEPROM_ADDR_COMPLETE))
3197                 return -EBUSY;
3198
3199         tmp = tr32(GRC_EEPROM_DATA);
3200
3201         /*
3202          * The data will always be opposite the native endian
3203          * format.  Perform a blind byteswap to compensate.
3204          */
3205         *val = swab32(tmp);
3206
3207         return 0;
3208 }
3209
3210 #define NVRAM_CMD_TIMEOUT 10000
3211
3212 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3213 {
3214         int i;
3215
3216         tw32(NVRAM_CMD, nvram_cmd);
3217         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3218                 usleep_range(10, 40);
3219                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3220                         udelay(10);
3221                         break;
3222                 }
3223         }
3224
3225         if (i == NVRAM_CMD_TIMEOUT)
3226                 return -EBUSY;
3227
3228         return 0;
3229 }
3230
3231 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3232 {
3233         if (tg3_flag(tp, NVRAM) &&
3234             tg3_flag(tp, NVRAM_BUFFERED) &&
3235             tg3_flag(tp, FLASH) &&
3236             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3237             (tp->nvram_jedecnum == JEDEC_ATMEL))
3238
3239                 addr = ((addr / tp->nvram_pagesize) <<
3240                         ATMEL_AT45DB0X1B_PAGE_POS) +
3241                        (addr % tp->nvram_pagesize);
3242
3243         return addr;
3244 }
3245
3246 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3247 {
3248         if (tg3_flag(tp, NVRAM) &&
3249             tg3_flag(tp, NVRAM_BUFFERED) &&
3250             tg3_flag(tp, FLASH) &&
3251             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3252             (tp->nvram_jedecnum == JEDEC_ATMEL))
3253
3254                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3255                         tp->nvram_pagesize) +
3256                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3257
3258         return addr;
3259 }
3260
3261 /* NOTE: Data read in from NVRAM is byteswapped according to
3262  * the byteswapping settings for all other register accesses.
3263  * tg3 devices are BE devices, so on a BE machine, the data
3264  * returned will be exactly as it is seen in NVRAM.  On a LE
3265  * machine, the 32-bit value will be byteswapped.
3266  */
3267 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3268 {
3269         int ret;
3270
3271         if (!tg3_flag(tp, NVRAM))
3272                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3273
3274         offset = tg3_nvram_phys_addr(tp, offset);
3275
3276         if (offset > NVRAM_ADDR_MSK)
3277                 return -EINVAL;
3278
3279         ret = tg3_nvram_lock(tp);
3280         if (ret)
3281                 return ret;
3282
3283         tg3_enable_nvram_access(tp);
3284
3285         tw32(NVRAM_ADDR, offset);
3286         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3287                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3288
3289         if (ret == 0)
3290                 *val = tr32(NVRAM_RDDATA);
3291
3292         tg3_disable_nvram_access(tp);
3293
3294         tg3_nvram_unlock(tp);
3295
3296         return ret;
3297 }
3298
3299 /* Ensures NVRAM data is in bytestream format. */
3300 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3301 {
3302         u32 v;
3303         int res = tg3_nvram_read(tp, offset, &v);
3304         if (!res)
3305                 *val = cpu_to_be32(v);
3306         return res;
3307 }
3308
3309 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3310                                     u32 offset, u32 len, u8 *buf)
3311 {
3312         int i, j, rc = 0;
3313         u32 val;
3314
3315         for (i = 0; i < len; i += 4) {
3316                 u32 addr;
3317                 __be32 data;
3318
3319                 addr = offset + i;
3320
3321                 memcpy(&data, buf + i, 4);
3322
3323                 /*
3324                  * The SEEPROM interface expects the data to always be opposite
3325                  * the native endian format.  We accomplish this by reversing
3326                  * all the operations that would have been performed on the
3327                  * data from a call to tg3_nvram_read_be32().
3328                  */
3329                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3330
3331                 val = tr32(GRC_EEPROM_ADDR);
3332                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3333
3334                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3335                         EEPROM_ADDR_READ);
3336                 tw32(GRC_EEPROM_ADDR, val |
3337                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3338                         (addr & EEPROM_ADDR_ADDR_MASK) |
3339                         EEPROM_ADDR_START |
3340                         EEPROM_ADDR_WRITE);
3341
3342                 for (j = 0; j < 1000; j++) {
3343                         val = tr32(GRC_EEPROM_ADDR);
3344
3345                         if (val & EEPROM_ADDR_COMPLETE)
3346                                 break;
3347                         msleep(1);
3348                 }
3349                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3350                         rc = -EBUSY;
3351                         break;
3352                 }
3353         }
3354
3355         return rc;
3356 }
3357
3358 /* offset and length are dword aligned */
3359 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3360                 u8 *buf)
3361 {
3362         int ret = 0;
3363         u32 pagesize = tp->nvram_pagesize;
3364         u32 pagemask = pagesize - 1;
3365         u32 nvram_cmd;
3366         u8 *tmp;
3367
3368         tmp = kmalloc(pagesize, GFP_KERNEL);
3369         if (tmp == NULL)
3370                 return -ENOMEM;
3371
3372         while (len) {
3373                 int j;
3374                 u32 phy_addr, page_off, size;
3375
3376                 phy_addr = offset & ~pagemask;
3377
3378                 for (j = 0; j < pagesize; j += 4) {
3379                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3380                                                   (__be32 *) (tmp + j));
3381                         if (ret)
3382                                 break;
3383                 }
3384                 if (ret)
3385                         break;
3386
3387                 page_off = offset & pagemask;
3388                 size = pagesize;
3389                 if (len < size)
3390                         size = len;
3391
3392                 len -= size;
3393
3394                 memcpy(tmp + page_off, buf, size);
3395
3396                 offset = offset + (pagesize - page_off);
3397
3398                 tg3_enable_nvram_access(tp);
3399
3400                 /*
3401                  * Before we can erase the flash page, we need
3402                  * to issue a special "write enable" command.
3403                  */
3404                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3405
3406                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3407                         break;
3408
3409                 /* Erase the target page */
3410                 tw32(NVRAM_ADDR, phy_addr);
3411
3412                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3413                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3414
3415                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3416                         break;
3417
3418                 /* Issue another write enable to start the write. */
3419                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3420
3421                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3422                         break;
3423
3424                 for (j = 0; j < pagesize; j += 4) {
3425                         __be32 data;
3426
3427                         data = *((__be32 *) (tmp + j));
3428
3429                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3430
3431                         tw32(NVRAM_ADDR, phy_addr + j);
3432
3433                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3434                                 NVRAM_CMD_WR;
3435
3436                         if (j == 0)
3437                                 nvram_cmd |= NVRAM_CMD_FIRST;
3438                         else if (j == (pagesize - 4))
3439                                 nvram_cmd |= NVRAM_CMD_LAST;
3440
3441                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3442                         if (ret)
3443                                 break;
3444                 }
3445                 if (ret)
3446                         break;
3447         }
3448
3449         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3450         tg3_nvram_exec_cmd(tp, nvram_cmd);
3451
3452         kfree(tmp);
3453
3454         return ret;
3455 }
3456
3457 /* offset and length are dword aligned */
3458 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3459                 u8 *buf)
3460 {
3461         int i, ret = 0;
3462
3463         for (i = 0; i < len; i += 4, offset += 4) {
3464                 u32 page_off, phy_addr, nvram_cmd;
3465                 __be32 data;
3466
3467                 memcpy(&data, buf + i, 4);
3468                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3469
3470                 page_off = offset % tp->nvram_pagesize;
3471
3472                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3473
3474                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3475
3476                 if (page_off == 0 || i == 0)
3477                         nvram_cmd |= NVRAM_CMD_FIRST;
3478                 if (page_off == (tp->nvram_pagesize - 4))
3479                         nvram_cmd |= NVRAM_CMD_LAST;
3480
3481                 if (i == (len - 4))
3482                         nvram_cmd |= NVRAM_CMD_LAST;
3483
3484                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3485                     !tg3_flag(tp, FLASH) ||
3486                     !tg3_flag(tp, 57765_PLUS))
3487                         tw32(NVRAM_ADDR, phy_addr);
3488
3489                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3490                     !tg3_flag(tp, 5755_PLUS) &&
3491                     (tp->nvram_jedecnum == JEDEC_ST) &&
3492                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3493                         u32 cmd;
3494
3495                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3496                         ret = tg3_nvram_exec_cmd(tp, cmd);
3497                         if (ret)
3498                                 break;
3499                 }
3500                 if (!tg3_flag(tp, FLASH)) {
3501                         /* We always do complete word writes to eeprom. */
3502                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3503                 }
3504
3505                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3506                 if (ret)
3507                         break;
3508         }
3509         return ret;
3510 }
3511
3512 /* offset and length are dword aligned */
3513 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3514 {
3515         int ret;
3516
3517         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3518                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3519                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3520                 udelay(40);
3521         }
3522
3523         if (!tg3_flag(tp, NVRAM)) {
3524                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3525         } else {
3526                 u32 grc_mode;
3527
3528                 ret = tg3_nvram_lock(tp);
3529                 if (ret)
3530                         return ret;
3531
3532                 tg3_enable_nvram_access(tp);
3533                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3534                         tw32(NVRAM_WRITE1, 0x406);
3535
3536                 grc_mode = tr32(GRC_MODE);
3537                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3538
3539                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3540                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3541                                 buf);
3542                 } else {
3543                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3544                                 buf);
3545                 }
3546
3547                 grc_mode = tr32(GRC_MODE);
3548                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3549
3550                 tg3_disable_nvram_access(tp);
3551                 tg3_nvram_unlock(tp);
3552         }
3553
3554         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3555                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3556                 udelay(40);
3557         }
3558
3559         return ret;
3560 }
3561
3562 #define RX_CPU_SCRATCH_BASE     0x30000
3563 #define RX_CPU_SCRATCH_SIZE     0x04000
3564 #define TX_CPU_SCRATCH_BASE     0x34000
3565 #define TX_CPU_SCRATCH_SIZE     0x04000
3566
3567 /* tp->lock is held. */
3568 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3569 {
3570         int i;
3571         const int iters = 10000;
3572
3573         for (i = 0; i < iters; i++) {
3574                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3575                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3576                 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3577                         break;
3578                 if (pci_channel_offline(tp->pdev))
3579                         return -EBUSY;
3580         }
3581
3582         return (i == iters) ? -EBUSY : 0;
3583 }
3584
3585 /* tp->lock is held. */
3586 static int tg3_rxcpu_pause(struct tg3 *tp)
3587 {
3588         int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3589
3590         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3591         tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3592         udelay(10);
3593
3594         return rc;
3595 }
3596
3597 /* tp->lock is held. */
3598 static int tg3_txcpu_pause(struct tg3 *tp)
3599 {
3600         return tg3_pause_cpu(tp, TX_CPU_BASE);
3601 }
3602
3603 /* tp->lock is held. */
3604 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3605 {
3606         tw32(cpu_base + CPU_STATE, 0xffffffff);
3607         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3608 }
3609
3610 /* tp->lock is held. */
3611 static void tg3_rxcpu_resume(struct tg3 *tp)
3612 {
3613         tg3_resume_cpu(tp, RX_CPU_BASE);
3614 }
3615
3616 /* tp->lock is held. */
3617 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3618 {
3619         int rc;
3620
3621         BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3622
3623         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3624                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3625
3626                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3627                 return 0;
3628         }
3629         if (cpu_base == RX_CPU_BASE) {
3630                 rc = tg3_rxcpu_pause(tp);
3631         } else {
3632                 /*
3633                  * There is only an Rx CPU for the 5750 derivative in the
3634                  * BCM4785.
3635                  */
3636                 if (tg3_flag(tp, IS_SSB_CORE))
3637                         return 0;
3638
3639                 rc = tg3_txcpu_pause(tp);
3640         }
3641
3642         if (rc) {
3643                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3644                            __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3645                 return -ENODEV;
3646         }
3647
3648         /* Clear firmware's nvram arbitration. */
3649         if (tg3_flag(tp, NVRAM))
3650                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3651         return 0;
3652 }
3653
3654 static int tg3_fw_data_len(struct tg3 *tp,
3655                            const struct tg3_firmware_hdr *fw_hdr)
3656 {
3657         int fw_len;
3658
3659         /* Non fragmented firmware have one firmware header followed by a
3660          * contiguous chunk of data to be written. The length field in that
3661          * header is not the length of data to be written but the complete
3662          * length of the bss. The data length is determined based on
3663          * tp->fw->size minus headers.
3664          *
3665          * Fragmented firmware have a main header followed by multiple
3666          * fragments. Each fragment is identical to non fragmented firmware
3667          * with a firmware header followed by a contiguous chunk of data. In
3668          * the main header, the length field is unused and set to 0xffffffff.
3669          * In each fragment header the length is the entire size of that
3670          * fragment i.e. fragment data + header length. Data length is
3671          * therefore length field in the header minus TG3_FW_HDR_LEN.
3672          */
3673         if (tp->fw_len == 0xffffffff)
3674                 fw_len = be32_to_cpu(fw_hdr->len);
3675         else
3676                 fw_len = tp->fw->size;
3677
3678         return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3679 }
3680
3681 /* tp->lock is held. */
3682 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3683                                  u32 cpu_scratch_base, int cpu_scratch_size,
3684                                  const struct tg3_firmware_hdr *fw_hdr)
3685 {
3686         int err, i;
3687         void (*write_op)(struct tg3 *, u32, u32);
3688         int total_len = tp->fw->size;
3689
3690         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3691                 netdev_err(tp->dev,
3692                            "%s: Trying to load TX cpu firmware which is 5705\n",
3693                            __func__);
3694                 return -EINVAL;
3695         }
3696
3697         if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3698                 write_op = tg3_write_mem;
3699         else
3700                 write_op = tg3_write_indirect_reg32;
3701
3702         if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3703                 /* It is possible that bootcode is still loading at this point.
3704                  * Get the nvram lock first before halting the cpu.
3705                  */
3706                 int lock_err = tg3_nvram_lock(tp);
3707                 err = tg3_halt_cpu(tp, cpu_base);
3708                 if (!lock_err)
3709                         tg3_nvram_unlock(tp);
3710                 if (err)
3711                         goto out;
3712
3713                 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3714                         write_op(tp, cpu_scratch_base + i, 0);
3715                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3716                 tw32(cpu_base + CPU_MODE,
3717                      tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3718         } else {
3719                 /* Subtract additional main header for fragmented firmware and
3720                  * advance to the first fragment
3721                  */
3722                 total_len -= TG3_FW_HDR_LEN;
3723                 fw_hdr++;
3724         }
3725
3726         do {
3727                 u32 *fw_data = (u32 *)(fw_hdr + 1);
3728                 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3729                         write_op(tp, cpu_scratch_base +
3730                                      (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3731                                      (i * sizeof(u32)),
3732                                  be32_to_cpu(fw_data[i]));
3733
3734                 total_len -= be32_to_cpu(fw_hdr->len);
3735
3736                 /* Advance to next fragment */
3737                 fw_hdr = (struct tg3_firmware_hdr *)
3738                          ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3739         } while (total_len > 0);
3740
3741         err = 0;
3742
3743 out:
3744         return err;
3745 }
3746
3747 /* tp->lock is held. */
3748 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3749 {
3750         int i;
3751         const int iters = 5;
3752
3753         tw32(cpu_base + CPU_STATE, 0xffffffff);
3754         tw32_f(cpu_base + CPU_PC, pc);
3755
3756         for (i = 0; i < iters; i++) {
3757                 if (tr32(cpu_base + CPU_PC) == pc)
3758                         break;
3759                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3760                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3761                 tw32_f(cpu_base + CPU_PC, pc);
3762                 udelay(1000);
3763         }
3764
3765         return (i == iters) ? -EBUSY : 0;
3766 }
3767
3768 /* tp->lock is held. */
3769 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3770 {
3771         const struct tg3_firmware_hdr *fw_hdr;
3772         int err;
3773
3774         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3775
3776         /* Firmware blob starts with version numbers, followed by
3777            start address and length. We are setting complete length.
3778            length = end_address_of_bss - start_address_of_text.
3779            Remainder is the blob to be loaded contiguously
3780            from start address. */
3781
3782         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3783                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3784                                     fw_hdr);
3785         if (err)
3786                 return err;
3787
3788         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3789                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3790                                     fw_hdr);
3791         if (err)
3792                 return err;
3793
3794         /* Now startup only the RX cpu. */
3795         err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3796                                        be32_to_cpu(fw_hdr->base_addr));
3797         if (err) {
3798                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3799                            "should be %08x\n", __func__,
3800                            tr32(RX_CPU_BASE + CPU_PC),
3801                                 be32_to_cpu(fw_hdr->base_addr));
3802                 return -ENODEV;
3803         }
3804
3805         tg3_rxcpu_resume(tp);
3806
3807         return 0;
3808 }
3809
3810 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3811 {
3812         const int iters = 1000;
3813         int i;
3814         u32 val;
3815
3816         /* Wait for boot code to complete initialization and enter service
3817          * loop. It is then safe to download service patches
3818          */
3819         for (i = 0; i < iters; i++) {
3820                 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3821                         break;
3822
3823                 udelay(10);
3824         }
3825
3826         if (i == iters) {
3827                 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3828                 return -EBUSY;
3829         }
3830
3831         val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3832         if (val & 0xff) {
3833                 netdev_warn(tp->dev,
3834                             "Other patches exist. Not downloading EEE patch\n");
3835                 return -EEXIST;
3836         }
3837
3838         return 0;
3839 }
3840
3841 /* tp->lock is held. */
3842 static void tg3_load_57766_firmware(struct tg3 *tp)
3843 {
3844         struct tg3_firmware_hdr *fw_hdr;
3845
3846         if (!tg3_flag(tp, NO_NVRAM))
3847                 return;
3848
3849         if (tg3_validate_rxcpu_state(tp))
3850                 return;
3851
3852         if (!tp->fw)
3853                 return;
3854
3855         /* This firmware blob has a different format than older firmware
3856          * releases as given below. The main difference is we have fragmented
3857          * data to be written to non-contiguous locations.
3858          *
3859          * In the beginning we have a firmware header identical to other
3860          * firmware which consists of version, base addr and length. The length
3861          * here is unused and set to 0xffffffff.
3862          *
3863          * This is followed by a series of firmware fragments which are
3864          * individually identical to previous firmware. i.e. they have the
3865          * firmware header and followed by data for that fragment. The version
3866          * field of the individual fragment header is unused.
3867          */
3868
3869         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3870         if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3871                 return;
3872
3873         if (tg3_rxcpu_pause(tp))
3874                 return;
3875
3876         /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3877         tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3878
3879         tg3_rxcpu_resume(tp);
3880 }
3881
3882 /* tp->lock is held. */
3883 static int tg3_load_tso_firmware(struct tg3 *tp)
3884 {
3885         const struct tg3_firmware_hdr *fw_hdr;
3886         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3887         int err;
3888
3889         if (!tg3_flag(tp, FW_TSO))
3890                 return 0;
3891
3892         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3893
3894         /* Firmware blob starts with version numbers, followed by
3895            start address and length. We are setting complete length.
3896            length = end_address_of_bss - start_address_of_text.
3897            Remainder is the blob to be loaded contiguously
3898            from start address. */
3899
3900         cpu_scratch_size = tp->fw_len;
3901
3902         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3903                 cpu_base = RX_CPU_BASE;
3904                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3905         } else {
3906                 cpu_base = TX_CPU_BASE;
3907                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3908                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3909         }
3910
3911         err = tg3_load_firmware_cpu(tp, cpu_base,
3912                                     cpu_scratch_base, cpu_scratch_size,
3913                                     fw_hdr);
3914         if (err)
3915                 return err;
3916
3917         /* Now startup the cpu. */
3918         err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3919                                        be32_to_cpu(fw_hdr->base_addr));
3920         if (err) {
3921                 netdev_err(tp->dev,
3922                            "%s fails to set CPU PC, is %08x should be %08x\n",
3923                            __func__, tr32(cpu_base + CPU_PC),
3924                            be32_to_cpu(fw_hdr->base_addr));
3925                 return -ENODEV;
3926         }
3927
3928         tg3_resume_cpu(tp, cpu_base);
3929         return 0;
3930 }
3931
3932 /* tp->lock is held. */
3933 static void __tg3_set_one_mac_addr(struct tg3 *tp, const u8 *mac_addr,
3934                                    int index)
3935 {
3936         u32 addr_high, addr_low;
3937
3938         addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3939         addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3940                     (mac_addr[4] <<  8) | mac_addr[5]);
3941
3942         if (index < 4) {
3943                 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3944                 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3945         } else {
3946                 index -= 4;
3947                 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3948                 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3949         }
3950 }
3951
3952 /* tp->lock is held. */
3953 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3954 {
3955         u32 addr_high;
3956         int i;
3957
3958         for (i = 0; i < 4; i++) {
3959                 if (i == 1 && skip_mac_1)
3960                         continue;
3961                 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3962         }
3963
3964         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3965             tg3_asic_rev(tp) == ASIC_REV_5704) {
3966                 for (i = 4; i < 16; i++)
3967                         __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3968         }
3969
3970         addr_high = (tp->dev->dev_addr[0] +
3971                      tp->dev->dev_addr[1] +
3972                      tp->dev->dev_addr[2] +
3973                      tp->dev->dev_addr[3] +
3974                      tp->dev->dev_addr[4] +
3975                      tp->dev->dev_addr[5]) &
3976                 TX_BACKOFF_SEED_MASK;
3977         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3978 }
3979
3980 static void tg3_enable_register_access(struct tg3 *tp)
3981 {
3982         /*
3983          * Make sure register accesses (indirect or otherwise) will function
3984          * correctly.
3985          */
3986         pci_write_config_dword(tp->pdev,
3987                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3988 }
3989
3990 static int tg3_power_up(struct tg3 *tp)
3991 {
3992         int err;
3993
3994         tg3_enable_register_access(tp);
3995
3996         err = pci_set_power_state(tp->pdev, PCI_D0);
3997         if (!err) {
3998                 /* Switch out of Vaux if it is a NIC */
3999                 tg3_pwrsrc_switch_to_vmain(tp);
4000         } else {
4001                 netdev_err(tp->dev, "Transition to D0 failed\n");
4002         }
4003
4004         return err;
4005 }
4006
4007 static int tg3_setup_phy(struct tg3 *, bool);
4008
4009 static int tg3_power_down_prepare(struct tg3 *tp)
4010 {
4011         u32 misc_host_ctrl;
4012         bool device_should_wake, do_low_power;
4013
4014         tg3_enable_register_access(tp);
4015
4016         /* Restore the CLKREQ setting. */
4017         if (tg3_flag(tp, CLKREQ_BUG))
4018                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4019                                          PCI_EXP_LNKCTL_CLKREQ_EN);
4020
4021         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4022         tw32(TG3PCI_MISC_HOST_CTRL,
4023              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4024
4025         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4026                              tg3_flag(tp, WOL_ENABLE);
4027
4028         if (tg3_flag(tp, USE_PHYLIB)) {
4029                 do_low_power = false;
4030                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4031                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4032                         __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
4033                         struct phy_device *phydev;
4034                         u32 phyid;
4035
4036                         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4037
4038                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4039
4040                         tp->link_config.speed = phydev->speed;
4041                         tp->link_config.duplex = phydev->duplex;
4042                         tp->link_config.autoneg = phydev->autoneg;
4043                         ethtool_convert_link_mode_to_legacy_u32(
4044                                 &tp->link_config.advertising,
4045                                 phydev->advertising);
4046
4047                         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
4048                         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
4049                                          advertising);
4050                         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
4051                                          advertising);
4052                         linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
4053                                          advertising);
4054
4055                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4056                                 if (tg3_flag(tp, WOL_SPEED_100MB)) {
4057                                         linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
4058                                                          advertising);
4059                                         linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
4060                                                          advertising);
4061                                         linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4062                                                          advertising);
4063                                 } else {
4064                                         linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4065                                                          advertising);
4066                                 }
4067                         }
4068
4069                         linkmode_copy(phydev->advertising, advertising);
4070                         phy_start_aneg(phydev);
4071
4072                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4073                         if (phyid != PHY_ID_BCMAC131) {
4074                                 phyid &= PHY_BCM_OUI_MASK;
4075                                 if (phyid == PHY_BCM_OUI_1 ||
4076                                     phyid == PHY_BCM_OUI_2 ||
4077                                     phyid == PHY_BCM_OUI_3)
4078                                         do_low_power = true;
4079                         }
4080                 }
4081         } else {
4082                 do_low_power = true;
4083
4084                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4085                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4086
4087                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4088                         tg3_setup_phy(tp, false);
4089         }
4090
4091         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4092                 u32 val;
4093
4094                 val = tr32(GRC_VCPU_EXT_CTRL);
4095                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4096         } else if (!tg3_flag(tp, ENABLE_ASF)) {
4097                 int i;
4098                 u32 val;
4099
4100                 for (i = 0; i < 200; i++) {
4101                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4102                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4103                                 break;
4104                         msleep(1);
4105                 }
4106         }
4107         if (tg3_flag(tp, WOL_CAP))
4108                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4109                                                      WOL_DRV_STATE_SHUTDOWN |
4110                                                      WOL_DRV_WOL |
4111                                                      WOL_SET_MAGIC_PKT);
4112
4113         if (device_should_wake) {
4114                 u32 mac_mode;
4115
4116                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4117                         if (do_low_power &&
4118                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4119                                 tg3_phy_auxctl_write(tp,
4120                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4121                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
4122                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4123                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4124                                 udelay(40);
4125                         }
4126
4127                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4128                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
4129                         else if (tp->phy_flags &
4130                                  TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4131                                 if (tp->link_config.active_speed == SPEED_1000)
4132                                         mac_mode = MAC_MODE_PORT_MODE_GMII;
4133                                 else
4134                                         mac_mode = MAC_MODE_PORT_MODE_MII;
4135                         } else
4136                                 mac_mode = MAC_MODE_PORT_MODE_MII;
4137
4138                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4139                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4140                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4141                                              SPEED_100 : SPEED_10;
4142                                 if (tg3_5700_link_polarity(tp, speed))
4143                                         mac_mode |= MAC_MODE_LINK_POLARITY;
4144                                 else
4145                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
4146                         }
4147                 } else {
4148                         mac_mode = MAC_MODE_PORT_MODE_TBI;
4149                 }
4150
4151                 if (!tg3_flag(tp, 5750_PLUS))
4152                         tw32(MAC_LED_CTRL, tp->led_ctrl);
4153
4154                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4155                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4156                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4157                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4158
4159                 if (tg3_flag(tp, ENABLE_APE))
4160                         mac_mode |= MAC_MODE_APE_TX_EN |
4161                                     MAC_MODE_APE_RX_EN |
4162                                     MAC_MODE_TDE_ENABLE;
4163
4164                 tw32_f(MAC_MODE, mac_mode);
4165                 udelay(100);
4166
4167                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4168                 udelay(10);
4169         }
4170
4171         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4172             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4173              tg3_asic_rev(tp) == ASIC_REV_5701)) {
4174                 u32 base_val;
4175
4176                 base_val = tp->pci_clock_ctrl;
4177                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4178                              CLOCK_CTRL_TXCLK_DISABLE);
4179
4180                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4181                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
4182         } else if (tg3_flag(tp, 5780_CLASS) ||
4183                    tg3_flag(tp, CPMU_PRESENT) ||
4184                    tg3_asic_rev(tp) == ASIC_REV_5906) {
4185                 /* do nothing */
4186         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4187                 u32 newbits1, newbits2;
4188
4189                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4190                     tg3_asic_rev(tp) == ASIC_REV_5701) {
4191                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4192                                     CLOCK_CTRL_TXCLK_DISABLE |
4193                                     CLOCK_CTRL_ALTCLK);
4194                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4195                 } else if (tg3_flag(tp, 5705_PLUS)) {
4196                         newbits1 = CLOCK_CTRL_625_CORE;
4197                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4198                 } else {
4199                         newbits1 = CLOCK_CTRL_ALTCLK;
4200                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4201                 }
4202
4203                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4204                             40);
4205
4206                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4207                             40);
4208
4209                 if (!tg3_flag(tp, 5705_PLUS)) {
4210                         u32 newbits3;
4211
4212                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4213                             tg3_asic_rev(tp) == ASIC_REV_5701) {
4214                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4215                                             CLOCK_CTRL_TXCLK_DISABLE |
4216                                             CLOCK_CTRL_44MHZ_CORE);
4217                         } else {
4218                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4219                         }
4220
4221                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
4222                                     tp->pci_clock_ctrl | newbits3, 40);
4223                 }
4224         }
4225
4226         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4227                 tg3_power_down_phy(tp, do_low_power);
4228
4229         tg3_frob_aux_power(tp, true);
4230
4231         /* Workaround for unstable PLL clock */
4232         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4233             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4234              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4235                 u32 val = tr32(0x7d00);
4236
4237                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4238                 tw32(0x7d00, val);
4239                 if (!tg3_flag(tp, ENABLE_ASF)) {
4240                         int err;
4241
4242                         err = tg3_nvram_lock(tp);
4243                         tg3_halt_cpu(tp, RX_CPU_BASE);
4244                         if (!err)
4245                                 tg3_nvram_unlock(tp);
4246                 }
4247         }
4248
4249         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4250
4251         tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4252
4253         return 0;
4254 }
4255
4256 static void tg3_power_down(struct tg3 *tp)
4257 {
4258         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4259         pci_set_power_state(tp->pdev, PCI_D3hot);
4260 }
4261
4262 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
4263 {
4264         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4265         case MII_TG3_AUX_STAT_10HALF:
4266                 *speed = SPEED_10;
4267                 *duplex = DUPLEX_HALF;
4268                 break;
4269
4270         case MII_TG3_AUX_STAT_10FULL:
4271                 *speed = SPEED_10;
4272                 *duplex = DUPLEX_FULL;
4273                 break;
4274
4275         case MII_TG3_AUX_STAT_100HALF:
4276                 *speed = SPEED_100;
4277                 *duplex = DUPLEX_HALF;
4278                 break;
4279
4280         case MII_TG3_AUX_STAT_100FULL:
4281                 *speed = SPEED_100;
4282                 *duplex = DUPLEX_FULL;
4283                 break;
4284
4285         case MII_TG3_AUX_STAT_1000HALF:
4286                 *speed = SPEED_1000;
4287                 *duplex = DUPLEX_HALF;
4288                 break;
4289
4290         case MII_TG3_AUX_STAT_1000FULL:
4291                 *speed = SPEED_1000;
4292                 *duplex = DUPLEX_FULL;
4293                 break;
4294
4295         default:
4296                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4297                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4298                                  SPEED_10;
4299                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4300                                   DUPLEX_HALF;
4301                         break;
4302                 }
4303                 *speed = SPEED_UNKNOWN;
4304                 *duplex = DUPLEX_UNKNOWN;
4305                 break;
4306         }
4307 }
4308
4309 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4310 {
4311         int err = 0;
4312         u32 val, new_adv;
4313
4314         new_adv = ADVERTISE_CSMA;
4315         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4316         new_adv |= mii_advertise_flowctrl(flowctrl);
4317
4318         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4319         if (err)
4320                 goto done;
4321
4322         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4323                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4324
4325                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4326                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4327                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4328
4329                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4330                 if (err)
4331                         goto done;
4332         }
4333
4334         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4335                 goto done;
4336
4337         tw32(TG3_CPMU_EEE_MODE,
4338              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4339
4340         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4341         if (!err) {
4342                 u32 err2;
4343
4344                 val = 0;
4345                 /* Advertise 100-BaseTX EEE ability */
4346                 if (advertise & ADVERTISED_100baseT_Full)
4347                         val |= MDIO_AN_EEE_ADV_100TX;
4348                 /* Advertise 1000-BaseT EEE ability */
4349                 if (advertise & ADVERTISED_1000baseT_Full)
4350                         val |= MDIO_AN_EEE_ADV_1000T;
4351
4352                 if (!tp->eee.eee_enabled) {
4353                         val = 0;
4354                         tp->eee.advertised = 0;
4355                 } else {
4356                         tp->eee.advertised = advertise &
4357                                              (ADVERTISED_100baseT_Full |
4358                                               ADVERTISED_1000baseT_Full);
4359                 }
4360
4361                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4362                 if (err)
4363                         val = 0;
4364
4365                 switch (tg3_asic_rev(tp)) {
4366                 case ASIC_REV_5717:
4367                 case ASIC_REV_57765:
4368                 case ASIC_REV_57766:
4369                 case ASIC_REV_5719:
4370                         /* If we advertised any eee advertisements above... */
4371                         if (val)
4372                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4373                                       MII_TG3_DSP_TAP26_RMRXSTO |
4374                                       MII_TG3_DSP_TAP26_OPCSINPT;
4375                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4376                         fallthrough;
4377                 case ASIC_REV_5720:
4378                 case ASIC_REV_5762:
4379                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4380                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4381                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4382                 }
4383
4384                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4385                 if (!err)
4386                         err = err2;
4387         }
4388
4389 done:
4390         return err;
4391 }
4392
4393 static void tg3_phy_copper_begin(struct tg3 *tp)
4394 {
4395         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4396             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4397                 u32 adv, fc;
4398
4399                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4400                     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4401                         adv = ADVERTISED_10baseT_Half |
4402                               ADVERTISED_10baseT_Full;
4403                         if (tg3_flag(tp, WOL_SPEED_100MB))
4404                                 adv |= ADVERTISED_100baseT_Half |
4405                                        ADVERTISED_100baseT_Full;
4406                         if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4407                                 if (!(tp->phy_flags &
4408                                       TG3_PHYFLG_DISABLE_1G_HD_ADV))
4409                                         adv |= ADVERTISED_1000baseT_Half;
4410                                 adv |= ADVERTISED_1000baseT_Full;
4411                         }
4412
4413                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4414                 } else {
4415                         adv = tp->link_config.advertising;
4416                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4417                                 adv &= ~(ADVERTISED_1000baseT_Half |
4418                                          ADVERTISED_1000baseT_Full);
4419
4420                         fc = tp->link_config.flowctrl;
4421                 }
4422
4423                 tg3_phy_autoneg_cfg(tp, adv, fc);
4424
4425                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4426                     (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4427                         /* Normally during power down we want to autonegotiate
4428                          * the lowest possible speed for WOL. However, to avoid
4429                          * link flap, we leave it untouched.
4430                          */
4431                         return;
4432                 }
4433
4434                 tg3_writephy(tp, MII_BMCR,
4435                              BMCR_ANENABLE | BMCR_ANRESTART);
4436         } else {
4437                 int i;
4438                 u32 bmcr, orig_bmcr;
4439
4440                 tp->link_config.active_speed = tp->link_config.speed;
4441                 tp->link_config.active_duplex = tp->link_config.duplex;
4442
4443                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4444                         /* With autoneg disabled, 5715 only links up when the
4445                          * advertisement register has the configured speed
4446                          * enabled.
4447                          */
4448                         tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4449                 }
4450
4451                 bmcr = 0;
4452                 switch (tp->link_config.speed) {
4453                 default:
4454                 case SPEED_10:
4455                         break;
4456
4457                 case SPEED_100:
4458                         bmcr |= BMCR_SPEED100;
4459                         break;
4460
4461                 case SPEED_1000:
4462                         bmcr |= BMCR_SPEED1000;
4463                         break;
4464                 }
4465
4466                 if (tp->link_config.duplex == DUPLEX_FULL)
4467                         bmcr |= BMCR_FULLDPLX;
4468
4469                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4470                     (bmcr != orig_bmcr)) {
4471                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4472                         for (i = 0; i < 1500; i++) {
4473                                 u32 tmp;
4474
4475                                 udelay(10);
4476                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4477                                     tg3_readphy(tp, MII_BMSR, &tmp))
4478                                         continue;
4479                                 if (!(tmp & BMSR_LSTATUS)) {
4480                                         udelay(40);
4481                                         break;
4482                                 }
4483                         }
4484                         tg3_writephy(tp, MII_BMCR, bmcr);
4485                         udelay(40);
4486                 }
4487         }
4488 }
4489
4490 static int tg3_phy_pull_config(struct tg3 *tp)
4491 {
4492         int err;
4493         u32 val;
4494
4495         err = tg3_readphy(tp, MII_BMCR, &val);
4496         if (err)
4497                 goto done;
4498
4499         if (!(val & BMCR_ANENABLE)) {
4500                 tp->link_config.autoneg = AUTONEG_DISABLE;
4501                 tp->link_config.advertising = 0;
4502                 tg3_flag_clear(tp, PAUSE_AUTONEG);
4503
4504                 err = -EIO;
4505
4506                 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4507                 case 0:
4508                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4509                                 goto done;
4510
4511                         tp->link_config.speed = SPEED_10;
4512                         break;
4513                 case BMCR_SPEED100:
4514                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4515                                 goto done;
4516
4517                         tp->link_config.speed = SPEED_100;
4518                         break;
4519                 case BMCR_SPEED1000:
4520                         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4521                                 tp->link_config.speed = SPEED_1000;
4522                                 break;
4523                         }
4524                         fallthrough;
4525                 default:
4526                         goto done;
4527                 }
4528
4529                 if (val & BMCR_FULLDPLX)
4530                         tp->link_config.duplex = DUPLEX_FULL;
4531                 else
4532                         tp->link_config.duplex = DUPLEX_HALF;
4533
4534                 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4535
4536                 err = 0;
4537                 goto done;
4538         }
4539
4540         tp->link_config.autoneg = AUTONEG_ENABLE;
4541         tp->link_config.advertising = ADVERTISED_Autoneg;
4542         tg3_flag_set(tp, PAUSE_AUTONEG);
4543
4544         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4545                 u32 adv;
4546
4547                 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4548                 if (err)
4549                         goto done;
4550
4551                 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4552                 tp->link_config.advertising |= adv | ADVERTISED_TP;
4553
4554                 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4555         } else {
4556                 tp->link_config.advertising |= ADVERTISED_FIBRE;
4557         }
4558
4559         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4560                 u32 adv;
4561
4562                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4563                         err = tg3_readphy(tp, MII_CTRL1000, &val);
4564                         if (err)
4565                                 goto done;
4566
4567                         adv = mii_ctrl1000_to_ethtool_adv_t(val);
4568                 } else {
4569                         err = tg3_readphy(tp, MII_ADVERTISE, &val);
4570                         if (err)
4571                                 goto done;
4572
4573                         adv = tg3_decode_flowctrl_1000X(val);
4574                         tp->link_config.flowctrl = adv;
4575
4576                         val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4577                         adv = mii_adv_to_ethtool_adv_x(val);
4578                 }
4579
4580                 tp->link_config.advertising |= adv;
4581         }
4582
4583 done:
4584         return err;
4585 }
4586
4587 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4588 {
4589         int err;
4590
4591         /* Turn off tap power management. */
4592         /* Set Extended packet length bit */
4593         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4594
4595         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4596         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4597         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4598         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4599         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4600
4601         udelay(40);
4602
4603         return err;
4604 }
4605
4606 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4607 {
4608         struct ethtool_eee eee;
4609
4610         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4611                 return true;
4612
4613         tg3_eee_pull_config(tp, &eee);
4614
4615         if (tp->eee.eee_enabled) {
4616                 if (tp->eee.advertised != eee.advertised ||
4617                     tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4618                     tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4619                         return false;
4620         } else {
4621                 /* EEE is disabled but we're advertising */
4622                 if (eee.advertised)
4623                         return false;
4624         }
4625
4626         return true;
4627 }
4628
4629 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4630 {
4631         u32 advmsk, tgtadv, advertising;
4632
4633         advertising = tp->link_config.advertising;
4634         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4635
4636         advmsk = ADVERTISE_ALL;
4637         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4638                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4639                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4640         }
4641
4642         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4643                 return false;
4644
4645         if ((*lcladv & advmsk) != tgtadv)
4646                 return false;
4647
4648         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4649                 u32 tg3_ctrl;
4650
4651                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4652
4653                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4654                         return false;
4655
4656                 if (tgtadv &&
4657                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4658                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4659                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4660                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4661                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4662                 } else {
4663                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4664                 }
4665
4666                 if (tg3_ctrl != tgtadv)
4667                         return false;
4668         }
4669
4670         return true;
4671 }
4672
4673 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4674 {
4675         u32 lpeth = 0;
4676
4677         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4678                 u32 val;
4679
4680                 if (tg3_readphy(tp, MII_STAT1000, &val))
4681                         return false;
4682
4683                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4684         }
4685
4686         if (tg3_readphy(tp, MII_LPA, rmtadv))
4687                 return false;
4688
4689         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4690         tp->link_config.rmt_adv = lpeth;
4691
4692         return true;
4693 }
4694
4695 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4696 {
4697         if (curr_link_up != tp->link_up) {
4698                 if (curr_link_up) {
4699                         netif_carrier_on(tp->dev);
4700                 } else {
4701                         netif_carrier_off(tp->dev);
4702                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4703                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4704                 }
4705
4706                 tg3_link_report(tp);
4707                 return true;
4708         }
4709
4710         return false;
4711 }
4712
4713 static void tg3_clear_mac_status(struct tg3 *tp)
4714 {
4715         tw32(MAC_EVENT, 0);
4716
4717         tw32_f(MAC_STATUS,
4718                MAC_STATUS_SYNC_CHANGED |
4719                MAC_STATUS_CFG_CHANGED |
4720                MAC_STATUS_MI_COMPLETION |
4721                MAC_STATUS_LNKSTATE_CHANGED);
4722         udelay(40);
4723 }
4724
4725 static void tg3_setup_eee(struct tg3 *tp)
4726 {
4727         u32 val;
4728
4729         val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4730               TG3_CPMU_EEE_LNKIDL_UART_IDL;
4731         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4732                 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4733
4734         tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4735
4736         tw32_f(TG3_CPMU_EEE_CTRL,
4737                TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4738
4739         val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4740               (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4741               TG3_CPMU_EEEMD_LPI_IN_RX |
4742               TG3_CPMU_EEEMD_EEE_ENABLE;
4743
4744         if (tg3_asic_rev(tp) != ASIC_REV_5717)
4745                 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4746
4747         if (tg3_flag(tp, ENABLE_APE))
4748                 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4749
4750         tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4751
4752         tw32_f(TG3_CPMU_EEE_DBTMR1,
4753                TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4754                (tp->eee.tx_lpi_timer & 0xffff));
4755
4756         tw32_f(TG3_CPMU_EEE_DBTMR2,
4757                TG3_CPMU_DBTMR2_APE_TX_2047US |
4758                TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4759 }
4760
4761 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4762 {
4763         bool current_link_up;
4764         u32 bmsr, val;
4765         u32 lcl_adv, rmt_adv;
4766         u32 current_speed;
4767         u8 current_duplex;
4768         int i, err;
4769
4770         tg3_clear_mac_status(tp);
4771
4772         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4773                 tw32_f(MAC_MI_MODE,
4774                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4775                 udelay(80);
4776         }
4777
4778         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4779
4780         /* Some third-party PHYs need to be reset on link going
4781          * down.
4782          */
4783         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4784              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4785              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4786             tp->link_up) {
4787                 tg3_readphy(tp, MII_BMSR, &bmsr);
4788                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4789                     !(bmsr & BMSR_LSTATUS))
4790                         force_reset = true;
4791         }
4792         if (force_reset)
4793                 tg3_phy_reset(tp);
4794
4795         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4796                 tg3_readphy(tp, MII_BMSR, &bmsr);
4797                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4798                     !tg3_flag(tp, INIT_COMPLETE))
4799                         bmsr = 0;
4800
4801                 if (!(bmsr & BMSR_LSTATUS)) {
4802                         err = tg3_init_5401phy_dsp(tp);
4803                         if (err)
4804                                 return err;
4805
4806                         tg3_readphy(tp, MII_BMSR, &bmsr);
4807                         for (i = 0; i < 1000; i++) {
4808                                 udelay(10);
4809                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4810                                     (bmsr & BMSR_LSTATUS)) {
4811                                         udelay(40);
4812                                         break;
4813                                 }
4814                         }
4815
4816                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4817                             TG3_PHY_REV_BCM5401_B0 &&
4818                             !(bmsr & BMSR_LSTATUS) &&
4819                             tp->link_config.active_speed == SPEED_1000) {
4820                                 err = tg3_phy_reset(tp);
4821                                 if (!err)
4822                                         err = tg3_init_5401phy_dsp(tp);
4823                                 if (err)
4824                                         return err;
4825                         }
4826                 }
4827         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4828                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4829                 /* 5701 {A0,B0} CRC bug workaround */
4830                 tg3_writephy(tp, 0x15, 0x0a75);
4831                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4832                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4833                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4834         }
4835
4836         /* Clear pending interrupts... */
4837         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4838         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4839
4840         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4841                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4842         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4843                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4844
4845         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4846             tg3_asic_rev(tp) == ASIC_REV_5701) {
4847                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4848                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4849                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4850                 else
4851                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4852         }
4853
4854         current_link_up = false;
4855         current_speed = SPEED_UNKNOWN;
4856         current_duplex = DUPLEX_UNKNOWN;
4857         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4858         tp->link_config.rmt_adv = 0;
4859
4860         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4861                 err = tg3_phy_auxctl_read(tp,
4862                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4863                                           &val);
4864                 if (!err && !(val & (1 << 10))) {
4865                         tg3_phy_auxctl_write(tp,
4866                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4867                                              val | (1 << 10));
4868                         goto relink;
4869                 }
4870         }
4871
4872         bmsr = 0;
4873         for (i = 0; i < 100; i++) {
4874                 tg3_readphy(tp, MII_BMSR, &bmsr);
4875                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4876                     (bmsr & BMSR_LSTATUS))
4877                         break;
4878                 udelay(40);
4879         }
4880
4881         if (bmsr & BMSR_LSTATUS) {
4882                 u32 aux_stat, bmcr;
4883
4884                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4885                 for (i = 0; i < 2000; i++) {
4886                         udelay(10);
4887                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4888                             aux_stat)
4889                                 break;
4890                 }
4891
4892                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4893                                              &current_speed,
4894                                              &current_duplex);
4895
4896                 bmcr = 0;
4897                 for (i = 0; i < 200; i++) {
4898                         tg3_readphy(tp, MII_BMCR, &bmcr);
4899                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4900                                 continue;
4901                         if (bmcr && bmcr != 0x7fff)
4902                                 break;
4903                         udelay(10);
4904                 }
4905
4906                 lcl_adv = 0;
4907                 rmt_adv = 0;
4908
4909                 tp->link_config.active_speed = current_speed;
4910                 tp->link_config.active_duplex = current_duplex;
4911
4912                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4913                         bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4914
4915                         if ((bmcr & BMCR_ANENABLE) &&
4916                             eee_config_ok &&
4917                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4918                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4919                                 current_link_up = true;
4920
4921                         /* EEE settings changes take effect only after a phy
4922                          * reset.  If we have skipped a reset due to Link Flap
4923                          * Avoidance being enabled, do it now.
4924                          */
4925                         if (!eee_config_ok &&
4926                             (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4927                             !force_reset) {
4928                                 tg3_setup_eee(tp);
4929                                 tg3_phy_reset(tp);
4930                         }
4931                 } else {
4932                         if (!(bmcr & BMCR_ANENABLE) &&
4933                             tp->link_config.speed == current_speed &&
4934                             tp->link_config.duplex == current_duplex) {
4935                                 current_link_up = true;
4936                         }
4937                 }
4938
4939                 if (current_link_up &&
4940                     tp->link_config.active_duplex == DUPLEX_FULL) {
4941                         u32 reg, bit;
4942
4943                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4944                                 reg = MII_TG3_FET_GEN_STAT;
4945                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4946                         } else {
4947                                 reg = MII_TG3_EXT_STAT;
4948                                 bit = MII_TG3_EXT_STAT_MDIX;
4949                         }
4950
4951                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4952                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4953
4954                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4955                 }
4956         }
4957
4958 relink:
4959         if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4960                 tg3_phy_copper_begin(tp);
4961
4962                 if (tg3_flag(tp, ROBOSWITCH)) {
4963                         current_link_up = true;
4964                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4965                         current_speed = SPEED_1000;
4966                         current_duplex = DUPLEX_FULL;
4967                         tp->link_config.active_speed = current_speed;
4968                         tp->link_config.active_duplex = current_duplex;
4969                 }
4970
4971                 tg3_readphy(tp, MII_BMSR, &bmsr);
4972                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4973                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4974                         current_link_up = true;
4975         }
4976
4977         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4978         if (current_link_up) {
4979                 if (tp->link_config.active_speed == SPEED_100 ||
4980                     tp->link_config.active_speed == SPEED_10)
4981                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4982                 else
4983                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4984         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4985                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4986         else
4987                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4988
4989         /* In order for the 5750 core in BCM4785 chip to work properly
4990          * in RGMII mode, the Led Control Register must be set up.
4991          */
4992         if (tg3_flag(tp, RGMII_MODE)) {
4993                 u32 led_ctrl = tr32(MAC_LED_CTRL);
4994                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4995
4996                 if (tp->link_config.active_speed == SPEED_10)
4997                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4998                 else if (tp->link_config.active_speed == SPEED_100)
4999                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5000                                      LED_CTRL_100MBPS_ON);
5001                 else if (tp->link_config.active_speed == SPEED_1000)
5002                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5003                                      LED_CTRL_1000MBPS_ON);
5004
5005                 tw32(MAC_LED_CTRL, led_ctrl);
5006                 udelay(40);
5007         }
5008
5009         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5010         if (tp->link_config.active_duplex == DUPLEX_HALF)
5011                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5012
5013         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5014                 if (current_link_up &&
5015                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5016                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5017                 else
5018                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5019         }
5020
5021         /* ??? Without this setting Netgear GA302T PHY does not
5022          * ??? send/receive packets...
5023          */
5024         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5025             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5026                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5027                 tw32_f(MAC_MI_MODE, tp->mi_mode);
5028                 udelay(80);
5029         }
5030
5031         tw32_f(MAC_MODE, tp->mac_mode);
5032         udelay(40);
5033
5034         tg3_phy_eee_adjust(tp, current_link_up);
5035
5036         if (tg3_flag(tp, USE_LINKCHG_REG)) {
5037                 /* Polled via timer. */
5038                 tw32_f(MAC_EVENT, 0);
5039         } else {
5040                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5041         }
5042         udelay(40);
5043
5044         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5045             current_link_up &&
5046             tp->link_config.active_speed == SPEED_1000 &&
5047             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5048                 udelay(120);
5049                 tw32_f(MAC_STATUS,
5050                      (MAC_STATUS_SYNC_CHANGED |
5051                       MAC_STATUS_CFG_CHANGED));
5052                 udelay(40);
5053                 tg3_write_mem(tp,
5054                               NIC_SRAM_FIRMWARE_MBOX,
5055                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5056         }
5057
5058         /* Prevent send BD corruption. */
5059         if (tg3_flag(tp, CLKREQ_BUG)) {
5060                 if (tp->link_config.active_speed == SPEED_100 ||
5061                     tp->link_config.active_speed == SPEED_10)
5062                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5063                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
5064                 else
5065                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5066                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
5067         }
5068
5069         tg3_test_and_report_link_chg(tp, current_link_up);
5070
5071         return 0;
5072 }
5073
5074 struct tg3_fiber_aneginfo {
5075         int state;
5076 #define ANEG_STATE_UNKNOWN              0
5077 #define ANEG_STATE_AN_ENABLE            1
5078 #define ANEG_STATE_RESTART_INIT         2
5079 #define ANEG_STATE_RESTART              3
5080 #define ANEG_STATE_DISABLE_LINK_OK      4
5081 #define ANEG_STATE_ABILITY_DETECT_INIT  5
5082 #define ANEG_STATE_ABILITY_DETECT       6
5083 #define ANEG_STATE_ACK_DETECT_INIT      7
5084 #define ANEG_STATE_ACK_DETECT           8
5085 #define ANEG_STATE_COMPLETE_ACK_INIT    9
5086 #define ANEG_STATE_COMPLETE_ACK         10
5087 #define ANEG_STATE_IDLE_DETECT_INIT     11
5088 #define ANEG_STATE_IDLE_DETECT          12
5089 #define ANEG_STATE_LINK_OK              13
5090 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
5091 #define ANEG_STATE_NEXT_PAGE_WAIT       15
5092
5093         u32 flags;
5094 #define MR_AN_ENABLE            0x00000001
5095 #define MR_RESTART_AN           0x00000002
5096 #define MR_AN_COMPLETE          0x00000004
5097 #define MR_PAGE_RX              0x00000008
5098 #define MR_NP_LOADED            0x00000010
5099 #define MR_TOGGLE_TX            0x00000020
5100 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
5101 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
5102 #define MR_LP_ADV_SYM_PAUSE     0x00000100
5103 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
5104 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5105 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5106 #define MR_LP_ADV_NEXT_PAGE     0x00001000
5107 #define MR_TOGGLE_RX            0x00002000
5108 #define MR_NP_RX                0x00004000
5109
5110 #define MR_LINK_OK              0x80000000
5111
5112         unsigned long link_time, cur_time;
5113
5114         u32 ability_match_cfg;
5115         int ability_match_count;
5116
5117         char ability_match, idle_match, ack_match;
5118
5119         u32 txconfig, rxconfig;
5120 #define ANEG_CFG_NP             0x00000080
5121 #define ANEG_CFG_ACK            0x00000040
5122 #define ANEG_CFG_RF2            0x00000020
5123 #define ANEG_CFG_RF1            0x00000010
5124 #define ANEG_CFG_PS2            0x00000001
5125 #define ANEG_CFG_PS1            0x00008000
5126 #define ANEG_CFG_HD             0x00004000
5127 #define ANEG_CFG_FD             0x00002000
5128 #define ANEG_CFG_INVAL          0x00001f06
5129
5130 };
5131 #define ANEG_OK         0
5132 #define ANEG_DONE       1
5133 #define ANEG_TIMER_ENAB 2
5134 #define ANEG_FAILED     -1
5135
5136 #define ANEG_STATE_SETTLE_TIME  10000
5137
5138 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5139                                    struct tg3_fiber_aneginfo *ap)
5140 {
5141         u16 flowctrl;
5142         unsigned long delta;
5143         u32 rx_cfg_reg;
5144         int ret;
5145
5146         if (ap->state == ANEG_STATE_UNKNOWN) {
5147                 ap->rxconfig = 0;
5148                 ap->link_time = 0;
5149                 ap->cur_time = 0;
5150                 ap->ability_match_cfg = 0;
5151                 ap->ability_match_count = 0;
5152                 ap->ability_match = 0;
5153                 ap->idle_match = 0;
5154                 ap->ack_match = 0;
5155         }
5156         ap->cur_time++;
5157
5158         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5159                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5160
5161                 if (rx_cfg_reg != ap->ability_match_cfg) {
5162                         ap->ability_match_cfg = rx_cfg_reg;
5163                         ap->ability_match = 0;
5164                         ap->ability_match_count = 0;
5165                 } else {
5166                         if (++ap->ability_match_count > 1) {
5167                                 ap->ability_match = 1;
5168                                 ap->ability_match_cfg = rx_cfg_reg;
5169                         }
5170                 }
5171                 if (rx_cfg_reg & ANEG_CFG_ACK)
5172                         ap->ack_match = 1;
5173                 else
5174                         ap->ack_match = 0;
5175
5176                 ap->idle_match = 0;
5177         } else {
5178                 ap->idle_match = 1;
5179                 ap->ability_match_cfg = 0;
5180                 ap->ability_match_count = 0;
5181                 ap->ability_match = 0;
5182                 ap->ack_match = 0;
5183
5184                 rx_cfg_reg = 0;
5185         }
5186
5187         ap->rxconfig = rx_cfg_reg;
5188         ret = ANEG_OK;
5189
5190         switch (ap->state) {
5191         case ANEG_STATE_UNKNOWN:
5192                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5193                         ap->state = ANEG_STATE_AN_ENABLE;
5194
5195                 fallthrough;
5196         case ANEG_STATE_AN_ENABLE:
5197                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5198                 if (ap->flags & MR_AN_ENABLE) {
5199                         ap->link_time = 0;
5200                         ap->cur_time = 0;
5201                         ap->ability_match_cfg = 0;
5202                         ap->ability_match_count = 0;
5203                         ap->ability_match = 0;
5204                         ap->idle_match = 0;
5205                         ap->ack_match = 0;
5206
5207                         ap->state = ANEG_STATE_RESTART_INIT;
5208                 } else {
5209                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
5210                 }
5211                 break;
5212
5213         case ANEG_STATE_RESTART_INIT:
5214                 ap->link_time = ap->cur_time;
5215                 ap->flags &= ~(MR_NP_LOADED);
5216                 ap->txconfig = 0;
5217                 tw32(MAC_TX_AUTO_NEG, 0);
5218                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5219                 tw32_f(MAC_MODE, tp->mac_mode);
5220                 udelay(40);
5221
5222                 ret = ANEG_TIMER_ENAB;
5223                 ap->state = ANEG_STATE_RESTART;
5224
5225                 fallthrough;
5226         case ANEG_STATE_RESTART:
5227                 delta = ap->cur_time - ap->link_time;
5228                 if (delta > ANEG_STATE_SETTLE_TIME)
5229                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5230                 else
5231                         ret = ANEG_TIMER_ENAB;
5232                 break;
5233
5234         case ANEG_STATE_DISABLE_LINK_OK:
5235                 ret = ANEG_DONE;
5236                 break;
5237
5238         case ANEG_STATE_ABILITY_DETECT_INIT:
5239                 ap->flags &= ~(MR_TOGGLE_TX);
5240                 ap->txconfig = ANEG_CFG_FD;
5241                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5242                 if (flowctrl & ADVERTISE_1000XPAUSE)
5243                         ap->txconfig |= ANEG_CFG_PS1;
5244                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5245                         ap->txconfig |= ANEG_CFG_PS2;
5246                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5247                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5248                 tw32_f(MAC_MODE, tp->mac_mode);
5249                 udelay(40);
5250
5251                 ap->state = ANEG_STATE_ABILITY_DETECT;
5252                 break;
5253
5254         case ANEG_STATE_ABILITY_DETECT:
5255                 if (ap->ability_match != 0 && ap->rxconfig != 0)
5256                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
5257                 break;
5258
5259         case ANEG_STATE_ACK_DETECT_INIT:
5260                 ap->txconfig |= ANEG_CFG_ACK;
5261                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5262                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5263                 tw32_f(MAC_MODE, tp->mac_mode);
5264                 udelay(40);
5265
5266                 ap->state = ANEG_STATE_ACK_DETECT;
5267
5268                 fallthrough;
5269         case ANEG_STATE_ACK_DETECT:
5270                 if (ap->ack_match != 0) {
5271                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5272                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5273                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5274                         } else {
5275                                 ap->state = ANEG_STATE_AN_ENABLE;
5276                         }
5277                 } else if (ap->ability_match != 0 &&
5278                            ap->rxconfig == 0) {
5279                         ap->state = ANEG_STATE_AN_ENABLE;
5280                 }
5281                 break;
5282
5283         case ANEG_STATE_COMPLETE_ACK_INIT:
5284                 if (ap->rxconfig & ANEG_CFG_INVAL) {
5285                         ret = ANEG_FAILED;
5286                         break;
5287                 }
5288                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5289                                MR_LP_ADV_HALF_DUPLEX |
5290                                MR_LP_ADV_SYM_PAUSE |
5291                                MR_LP_ADV_ASYM_PAUSE |
5292                                MR_LP_ADV_REMOTE_FAULT1 |
5293                                MR_LP_ADV_REMOTE_FAULT2 |
5294                                MR_LP_ADV_NEXT_PAGE |
5295                                MR_TOGGLE_RX |
5296                                MR_NP_RX);
5297                 if (ap->rxconfig & ANEG_CFG_FD)
5298                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5299                 if (ap->rxconfig & ANEG_CFG_HD)
5300                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5301                 if (ap->rxconfig & ANEG_CFG_PS1)
5302                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
5303                 if (ap->rxconfig & ANEG_CFG_PS2)
5304                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5305                 if (ap->rxconfig & ANEG_CFG_RF1)
5306                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5307                 if (ap->rxconfig & ANEG_CFG_RF2)
5308                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5309                 if (ap->rxconfig & ANEG_CFG_NP)
5310                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
5311
5312                 ap->link_time = ap->cur_time;
5313
5314                 ap->flags ^= (MR_TOGGLE_TX);
5315                 if (ap->rxconfig & 0x0008)
5316                         ap->flags |= MR_TOGGLE_RX;
5317                 if (ap->rxconfig & ANEG_CFG_NP)
5318                         ap->flags |= MR_NP_RX;
5319                 ap->flags |= MR_PAGE_RX;
5320
5321                 ap->state = ANEG_STATE_COMPLETE_ACK;
5322                 ret = ANEG_TIMER_ENAB;
5323                 break;
5324
5325         case ANEG_STATE_COMPLETE_ACK:
5326                 if (ap->ability_match != 0 &&
5327                     ap->rxconfig == 0) {
5328                         ap->state = ANEG_STATE_AN_ENABLE;
5329                         break;
5330                 }
5331                 delta = ap->cur_time - ap->link_time;
5332                 if (delta > ANEG_STATE_SETTLE_TIME) {
5333                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5334                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5335                         } else {
5336                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5337                                     !(ap->flags & MR_NP_RX)) {
5338                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5339                                 } else {
5340                                         ret = ANEG_FAILED;
5341                                 }
5342                         }
5343                 }
5344                 break;
5345
5346         case ANEG_STATE_IDLE_DETECT_INIT:
5347                 ap->link_time = ap->cur_time;
5348                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5349                 tw32_f(MAC_MODE, tp->mac_mode);
5350                 udelay(40);
5351
5352                 ap->state = ANEG_STATE_IDLE_DETECT;
5353                 ret = ANEG_TIMER_ENAB;
5354                 break;
5355
5356         case ANEG_STATE_IDLE_DETECT:
5357                 if (ap->ability_match != 0 &&
5358                     ap->rxconfig == 0) {
5359                         ap->state = ANEG_STATE_AN_ENABLE;
5360                         break;
5361                 }
5362                 delta = ap->cur_time - ap->link_time;
5363                 if (delta > ANEG_STATE_SETTLE_TIME) {
5364                         /* XXX another gem from the Broadcom driver :( */
5365                         ap->state = ANEG_STATE_LINK_OK;
5366                 }
5367                 break;
5368
5369         case ANEG_STATE_LINK_OK:
5370                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5371                 ret = ANEG_DONE;
5372                 break;
5373
5374         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5375                 /* ??? unimplemented */
5376                 break;
5377
5378         case ANEG_STATE_NEXT_PAGE_WAIT:
5379                 /* ??? unimplemented */
5380                 break;
5381
5382         default:
5383                 ret = ANEG_FAILED;
5384                 break;
5385         }
5386
5387         return ret;
5388 }
5389
5390 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5391 {
5392         int res = 0;
5393         struct tg3_fiber_aneginfo aninfo;
5394         int status = ANEG_FAILED;
5395         unsigned int tick;
5396         u32 tmp;
5397
5398         tw32_f(MAC_TX_AUTO_NEG, 0);
5399
5400         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5401         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5402         udelay(40);
5403
5404         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5405         udelay(40);
5406
5407         memset(&aninfo, 0, sizeof(aninfo));
5408         aninfo.flags |= MR_AN_ENABLE;
5409         aninfo.state = ANEG_STATE_UNKNOWN;
5410         aninfo.cur_time = 0;
5411         tick = 0;
5412         while (++tick < 195000) {
5413                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5414                 if (status == ANEG_DONE || status == ANEG_FAILED)
5415                         break;
5416
5417                 udelay(1);
5418         }
5419
5420         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5421         tw32_f(MAC_MODE, tp->mac_mode);
5422         udelay(40);
5423
5424         *txflags = aninfo.txconfig;
5425         *rxflags = aninfo.flags;
5426
5427         if (status == ANEG_DONE &&
5428             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5429                              MR_LP_ADV_FULL_DUPLEX)))
5430                 res = 1;
5431
5432         return res;
5433 }
5434
5435 static void tg3_init_bcm8002(struct tg3 *tp)
5436 {
5437         u32 mac_status = tr32(MAC_STATUS);
5438         int i;
5439
5440         /* Reset when initting first time or we have a link. */
5441         if (tg3_flag(tp, INIT_COMPLETE) &&
5442             !(mac_status & MAC_STATUS_PCS_SYNCED))
5443                 return;
5444
5445         /* Set PLL lock range. */
5446         tg3_writephy(tp, 0x16, 0x8007);
5447
5448         /* SW reset */
5449         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5450
5451         /* Wait for reset to complete. */
5452         /* XXX schedule_timeout() ... */
5453         for (i = 0; i < 500; i++)
5454                 udelay(10);
5455
5456         /* Config mode; select PMA/Ch 1 regs. */
5457         tg3_writephy(tp, 0x10, 0x8411);
5458
5459         /* Enable auto-lock and comdet, select txclk for tx. */
5460         tg3_writephy(tp, 0x11, 0x0a10);
5461
5462         tg3_writephy(tp, 0x18, 0x00a0);
5463         tg3_writephy(tp, 0x16, 0x41ff);
5464
5465         /* Assert and deassert POR. */
5466         tg3_writephy(tp, 0x13, 0x0400);
5467         udelay(40);
5468         tg3_writephy(tp, 0x13, 0x0000);
5469
5470         tg3_writephy(tp, 0x11, 0x0a50);
5471         udelay(40);
5472         tg3_writephy(tp, 0x11, 0x0a10);
5473
5474         /* Wait for signal to stabilize */
5475         /* XXX schedule_timeout() ... */
5476         for (i = 0; i < 15000; i++)
5477                 udelay(10);
5478
5479         /* Deselect the channel register so we can read the PHYID
5480          * later.
5481          */
5482         tg3_writephy(tp, 0x10, 0x8011);
5483 }
5484
5485 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5486 {
5487         u16 flowctrl;
5488         bool current_link_up;
5489         u32 sg_dig_ctrl, sg_dig_status;
5490         u32 serdes_cfg, expected_sg_dig_ctrl;
5491         int workaround, port_a;
5492
5493         serdes_cfg = 0;
5494         workaround = 0;
5495         port_a = 1;
5496         current_link_up = false;
5497
5498         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5499             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5500                 workaround = 1;
5501                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5502                         port_a = 0;
5503
5504                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5505                 /* preserve bits 20-23 for voltage regulator */
5506                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5507         }
5508
5509         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5510
5511         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5512                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5513                         if (workaround) {
5514                                 u32 val = serdes_cfg;
5515
5516                                 if (port_a)
5517                                         val |= 0xc010000;
5518                                 else
5519                                         val |= 0x4010000;
5520                                 tw32_f(MAC_SERDES_CFG, val);
5521                         }
5522
5523                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5524                 }
5525                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5526                         tg3_setup_flow_control(tp, 0, 0);
5527                         current_link_up = true;
5528                 }
5529                 goto out;
5530         }
5531
5532         /* Want auto-negotiation.  */
5533         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5534
5535         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5536         if (flowctrl & ADVERTISE_1000XPAUSE)
5537                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5538         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5539                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5540
5541         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5542                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5543                     tp->serdes_counter &&
5544                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5545                                     MAC_STATUS_RCVD_CFG)) ==
5546                      MAC_STATUS_PCS_SYNCED)) {
5547                         tp->serdes_counter--;
5548                         current_link_up = true;
5549                         goto out;
5550                 }
5551 restart_autoneg:
5552                 if (workaround)
5553                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5554                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5555                 udelay(5);
5556                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5557
5558                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5559                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5560         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5561                                  MAC_STATUS_SIGNAL_DET)) {
5562                 sg_dig_status = tr32(SG_DIG_STATUS);
5563                 mac_status = tr32(MAC_STATUS);
5564
5565                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5566                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5567                         u32 local_adv = 0, remote_adv = 0;
5568
5569                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5570                                 local_adv |= ADVERTISE_1000XPAUSE;
5571                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5572                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5573
5574                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5575                                 remote_adv |= LPA_1000XPAUSE;
5576                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5577                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5578
5579                         tp->link_config.rmt_adv =
5580                                            mii_adv_to_ethtool_adv_x(remote_adv);
5581
5582                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5583                         current_link_up = true;
5584                         tp->serdes_counter = 0;
5585                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5586                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5587                         if (tp->serdes_counter)
5588                                 tp->serdes_counter--;
5589                         else {
5590                                 if (workaround) {
5591                                         u32 val = serdes_cfg;
5592
5593                                         if (port_a)
5594                                                 val |= 0xc010000;
5595                                         else
5596                                                 val |= 0x4010000;
5597
5598                                         tw32_f(MAC_SERDES_CFG, val);
5599                                 }
5600
5601                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5602                                 udelay(40);
5603
5604                                 /* Link parallel detection - link is up */
5605                                 /* only if we have PCS_SYNC and not */
5606                                 /* receiving config code words */
5607                                 mac_status = tr32(MAC_STATUS);
5608                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5609                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5610                                         tg3_setup_flow_control(tp, 0, 0);
5611                                         current_link_up = true;
5612                                         tp->phy_flags |=
5613                                                 TG3_PHYFLG_PARALLEL_DETECT;
5614                                         tp->serdes_counter =
5615                                                 SERDES_PARALLEL_DET_TIMEOUT;
5616                                 } else
5617                                         goto restart_autoneg;
5618                         }
5619                 }
5620         } else {
5621                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5622                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5623         }
5624
5625 out:
5626         return current_link_up;
5627 }
5628
5629 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5630 {
5631         bool current_link_up = false;
5632
5633         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5634                 goto out;
5635
5636         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5637                 u32 txflags, rxflags;
5638                 int i;
5639
5640                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5641                         u32 local_adv = 0, remote_adv = 0;
5642
5643                         if (txflags & ANEG_CFG_PS1)
5644                                 local_adv |= ADVERTISE_1000XPAUSE;
5645                         if (txflags & ANEG_CFG_PS2)
5646                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5647
5648                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5649                                 remote_adv |= LPA_1000XPAUSE;
5650                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5651                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5652
5653                         tp->link_config.rmt_adv =
5654                                            mii_adv_to_ethtool_adv_x(remote_adv);
5655
5656                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5657
5658                         current_link_up = true;
5659                 }
5660                 for (i = 0; i < 30; i++) {
5661                         udelay(20);
5662                         tw32_f(MAC_STATUS,
5663                                (MAC_STATUS_SYNC_CHANGED |
5664                                 MAC_STATUS_CFG_CHANGED));
5665                         udelay(40);
5666                         if ((tr32(MAC_STATUS) &
5667                              (MAC_STATUS_SYNC_CHANGED |
5668                               MAC_STATUS_CFG_CHANGED)) == 0)
5669                                 break;
5670                 }
5671
5672                 mac_status = tr32(MAC_STATUS);
5673                 if (!current_link_up &&
5674                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5675                     !(mac_status & MAC_STATUS_RCVD_CFG))
5676                         current_link_up = true;
5677         } else {
5678                 tg3_setup_flow_control(tp, 0, 0);
5679
5680                 /* Forcing 1000FD link up. */
5681                 current_link_up = true;
5682
5683                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5684                 udelay(40);
5685
5686                 tw32_f(MAC_MODE, tp->mac_mode);
5687                 udelay(40);
5688         }
5689
5690 out:
5691         return current_link_up;
5692 }
5693
5694 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5695 {
5696         u32 orig_pause_cfg;
5697         u32 orig_active_speed;
5698         u8 orig_active_duplex;
5699         u32 mac_status;
5700         bool current_link_up;
5701         int i;
5702
5703         orig_pause_cfg = tp->link_config.active_flowctrl;
5704         orig_active_speed = tp->link_config.active_speed;
5705         orig_active_duplex = tp->link_config.active_duplex;
5706
5707         if (!tg3_flag(tp, HW_AUTONEG) &&
5708             tp->link_up &&
5709             tg3_flag(tp, INIT_COMPLETE)) {
5710                 mac_status = tr32(MAC_STATUS);
5711                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5712                                MAC_STATUS_SIGNAL_DET |
5713                                MAC_STATUS_CFG_CHANGED |
5714                                MAC_STATUS_RCVD_CFG);
5715                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5716                                    MAC_STATUS_SIGNAL_DET)) {
5717                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5718                                             MAC_STATUS_CFG_CHANGED));
5719                         return 0;
5720                 }
5721         }
5722
5723         tw32_f(MAC_TX_AUTO_NEG, 0);
5724
5725         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5726         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5727         tw32_f(MAC_MODE, tp->mac_mode);
5728         udelay(40);
5729
5730         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5731                 tg3_init_bcm8002(tp);
5732
5733         /* Enable link change event even when serdes polling.  */
5734         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5735         udelay(40);
5736
5737         tp->link_config.rmt_adv = 0;
5738         mac_status = tr32(MAC_STATUS);
5739
5740         if (tg3_flag(tp, HW_AUTONEG))
5741                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5742         else
5743                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5744
5745         tp->napi[0].hw_status->status =
5746                 (SD_STATUS_UPDATED |
5747                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5748
5749         for (i = 0; i < 100; i++) {
5750                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5751                                     MAC_STATUS_CFG_CHANGED));
5752                 udelay(5);
5753                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5754                                          MAC_STATUS_CFG_CHANGED |
5755                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5756                         break;
5757         }
5758
5759         mac_status = tr32(MAC_STATUS);
5760         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5761                 current_link_up = false;
5762                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5763                     tp->serdes_counter == 0) {
5764                         tw32_f(MAC_MODE, (tp->mac_mode |
5765                                           MAC_MODE_SEND_CONFIGS));
5766                         udelay(1);
5767                         tw32_f(MAC_MODE, tp->mac_mode);
5768                 }
5769         }
5770
5771         if (current_link_up) {
5772                 tp->link_config.active_speed = SPEED_1000;
5773                 tp->link_config.active_duplex = DUPLEX_FULL;
5774                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5775                                     LED_CTRL_LNKLED_OVERRIDE |
5776                                     LED_CTRL_1000MBPS_ON));
5777         } else {
5778                 tp->link_config.active_speed = SPEED_UNKNOWN;
5779                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5780                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5781                                     LED_CTRL_LNKLED_OVERRIDE |
5782                                     LED_CTRL_TRAFFIC_OVERRIDE));
5783         }
5784
5785         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5786                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5787                 if (orig_pause_cfg != now_pause_cfg ||
5788                     orig_active_speed != tp->link_config.active_speed ||
5789                     orig_active_duplex != tp->link_config.active_duplex)
5790                         tg3_link_report(tp);
5791         }
5792
5793         return 0;
5794 }
5795
5796 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5797 {
5798         int err = 0;
5799         u32 bmsr, bmcr;
5800         u32 current_speed = SPEED_UNKNOWN;
5801         u8 current_duplex = DUPLEX_UNKNOWN;
5802         bool current_link_up = false;
5803         u32 local_adv, remote_adv, sgsr;
5804
5805         if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5806              tg3_asic_rev(tp) == ASIC_REV_5720) &&
5807              !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5808              (sgsr & SERDES_TG3_SGMII_MODE)) {
5809
5810                 if (force_reset)
5811                         tg3_phy_reset(tp);
5812
5813                 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5814
5815                 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5816                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5817                 } else {
5818                         current_link_up = true;
5819                         if (sgsr & SERDES_TG3_SPEED_1000) {
5820                                 current_speed = SPEED_1000;
5821                                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5822                         } else if (sgsr & SERDES_TG3_SPEED_100) {
5823                                 current_speed = SPEED_100;
5824                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5825                         } else {
5826                                 current_speed = SPEED_10;
5827                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5828                         }
5829
5830                         if (sgsr & SERDES_TG3_FULL_DUPLEX)
5831                                 current_duplex = DUPLEX_FULL;
5832                         else
5833                                 current_duplex = DUPLEX_HALF;
5834                 }
5835
5836                 tw32_f(MAC_MODE, tp->mac_mode);
5837                 udelay(40);
5838
5839                 tg3_clear_mac_status(tp);
5840
5841                 goto fiber_setup_done;
5842         }
5843
5844         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5845         tw32_f(MAC_MODE, tp->mac_mode);
5846         udelay(40);
5847
5848         tg3_clear_mac_status(tp);
5849
5850         if (force_reset)
5851                 tg3_phy_reset(tp);
5852
5853         tp->link_config.rmt_adv = 0;
5854
5855         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5856         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5857         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5858                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5859                         bmsr |= BMSR_LSTATUS;
5860                 else
5861                         bmsr &= ~BMSR_LSTATUS;
5862         }
5863
5864         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5865
5866         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5867             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5868                 /* do nothing, just check for link up at the end */
5869         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5870                 u32 adv, newadv;
5871
5872                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5873                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5874                                  ADVERTISE_1000XPAUSE |
5875                                  ADVERTISE_1000XPSE_ASYM |
5876                                  ADVERTISE_SLCT);
5877
5878                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5879                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5880
5881                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5882                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5883                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5884                         tg3_writephy(tp, MII_BMCR, bmcr);
5885
5886                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5887                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5888                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5889
5890                         return err;
5891                 }
5892         } else {
5893                 u32 new_bmcr;
5894
5895                 bmcr &= ~BMCR_SPEED1000;
5896                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5897
5898                 if (tp->link_config.duplex == DUPLEX_FULL)
5899                         new_bmcr |= BMCR_FULLDPLX;
5900
5901                 if (new_bmcr != bmcr) {
5902                         /* BMCR_SPEED1000 is a reserved bit that needs
5903                          * to be set on write.
5904                          */
5905                         new_bmcr |= BMCR_SPEED1000;
5906
5907                         /* Force a linkdown */
5908                         if (tp->link_up) {
5909                                 u32 adv;
5910
5911                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5912                                 adv &= ~(ADVERTISE_1000XFULL |
5913                                          ADVERTISE_1000XHALF |
5914                                          ADVERTISE_SLCT);
5915                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5916                                 tg3_writephy(tp, MII_BMCR, bmcr |
5917                                                            BMCR_ANRESTART |
5918                                                            BMCR_ANENABLE);
5919                                 udelay(10);
5920                                 tg3_carrier_off(tp);
5921                         }
5922                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5923                         bmcr = new_bmcr;
5924                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5925                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5926                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5927                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5928                                         bmsr |= BMSR_LSTATUS;
5929                                 else
5930                                         bmsr &= ~BMSR_LSTATUS;
5931                         }
5932                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5933                 }
5934         }
5935
5936         if (bmsr & BMSR_LSTATUS) {
5937                 current_speed = SPEED_1000;
5938                 current_link_up = true;
5939                 if (bmcr & BMCR_FULLDPLX)
5940                         current_duplex = DUPLEX_FULL;
5941                 else
5942                         current_duplex = DUPLEX_HALF;
5943
5944                 local_adv = 0;
5945                 remote_adv = 0;
5946
5947                 if (bmcr & BMCR_ANENABLE) {
5948                         u32 common;
5949
5950                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5951                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5952                         common = local_adv & remote_adv;
5953                         if (common & (ADVERTISE_1000XHALF |
5954                                       ADVERTISE_1000XFULL)) {
5955                                 if (common & ADVERTISE_1000XFULL)
5956                                         current_duplex = DUPLEX_FULL;
5957                                 else
5958                                         current_duplex = DUPLEX_HALF;
5959
5960                                 tp->link_config.rmt_adv =
5961                                            mii_adv_to_ethtool_adv_x(remote_adv);
5962                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5963                                 /* Link is up via parallel detect */
5964                         } else {
5965                                 current_link_up = false;
5966                         }
5967                 }
5968         }
5969
5970 fiber_setup_done:
5971         if (current_link_up && current_duplex == DUPLEX_FULL)
5972                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5973
5974         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5975         if (tp->link_config.active_duplex == DUPLEX_HALF)
5976                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5977
5978         tw32_f(MAC_MODE, tp->mac_mode);
5979         udelay(40);
5980
5981         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5982
5983         tp->link_config.active_speed = current_speed;
5984         tp->link_config.active_duplex = current_duplex;
5985
5986         tg3_test_and_report_link_chg(tp, current_link_up);
5987         return err;
5988 }
5989
5990 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5991 {
5992         if (tp->serdes_counter) {
5993                 /* Give autoneg time to complete. */
5994                 tp->serdes_counter--;
5995                 return;
5996         }
5997
5998         if (!tp->link_up &&
5999             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6000                 u32 bmcr;
6001
6002                 tg3_readphy(tp, MII_BMCR, &bmcr);
6003                 if (bmcr & BMCR_ANENABLE) {
6004                         u32 phy1, phy2;
6005
6006                         /* Select shadow register 0x1f */
6007                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6008                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6009
6010                         /* Select expansion interrupt status register */
6011                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6012                                          MII_TG3_DSP_EXP1_INT_STAT);
6013                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6014                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6015
6016                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6017                                 /* We have signal detect and not receiving
6018                                  * config code words, link is up by parallel
6019                                  * detection.
6020                                  */
6021
6022                                 bmcr &= ~BMCR_ANENABLE;
6023                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6024                                 tg3_writephy(tp, MII_BMCR, bmcr);
6025                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6026                         }
6027                 }
6028         } else if (tp->link_up &&
6029                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6030                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6031                 u32 phy2;
6032
6033                 /* Select expansion interrupt status register */
6034                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6035                                  MII_TG3_DSP_EXP1_INT_STAT);
6036                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6037                 if (phy2 & 0x20) {
6038                         u32 bmcr;
6039
6040                         /* Config code words received, turn on autoneg. */
6041                         tg3_readphy(tp, MII_BMCR, &bmcr);
6042                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6043
6044                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6045
6046                 }
6047         }
6048 }
6049
6050 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6051 {
6052         u32 val;
6053         int err;
6054
6055         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6056                 err = tg3_setup_fiber_phy(tp, force_reset);
6057         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6058                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6059         else
6060                 err = tg3_setup_copper_phy(tp, force_reset);
6061
6062         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6063                 u32 scale;
6064
6065                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6066                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6067                         scale = 65;
6068                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6069                         scale = 6;
6070                 else
6071                         scale = 12;
6072
6073                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6074                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6075                 tw32(GRC_MISC_CFG, val);
6076         }
6077
6078         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6079               (6 << TX_LENGTHS_IPG_SHIFT);
6080         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6081             tg3_asic_rev(tp) == ASIC_REV_5762)
6082                 val |= tr32(MAC_TX_LENGTHS) &
6083                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
6084                         TX_LENGTHS_CNT_DWN_VAL_MSK);
6085
6086         if (tp->link_config.active_speed == SPEED_1000 &&
6087             tp->link_config.active_duplex == DUPLEX_HALF)
6088                 tw32(MAC_TX_LENGTHS, val |
6089                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6090         else
6091                 tw32(MAC_TX_LENGTHS, val |
6092                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6093
6094         if (!tg3_flag(tp, 5705_PLUS)) {
6095                 if (tp->link_up) {
6096                         tw32(HOSTCC_STAT_COAL_TICKS,
6097                              tp->coal.stats_block_coalesce_usecs);
6098                 } else {
6099                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
6100                 }
6101         }
6102
6103         if (tg3_flag(tp, ASPM_WORKAROUND)) {
6104                 val = tr32(PCIE_PWR_MGMT_THRESH);
6105                 if (!tp->link_up)
6106                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6107                               tp->pwrmgmt_thresh;
6108                 else
6109                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6110                 tw32(PCIE_PWR_MGMT_THRESH, val);
6111         }
6112
6113         return err;
6114 }
6115
6116 /* tp->lock must be held */
6117 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
6118 {
6119         u64 stamp;
6120
6121         ptp_read_system_prets(sts);
6122         stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6123         ptp_read_system_postts(sts);
6124         stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6125
6126         return stamp;
6127 }
6128
6129 /* tp->lock must be held */
6130 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6131 {
6132         u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6133
6134         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6135         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6136         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6137         tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6138 }
6139
6140 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6141 static inline void tg3_full_unlock(struct tg3 *tp);
6142 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6143 {
6144         struct tg3 *tp = netdev_priv(dev);
6145
6146         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6147                                 SOF_TIMESTAMPING_RX_SOFTWARE |
6148                                 SOF_TIMESTAMPING_SOFTWARE;
6149
6150         if (tg3_flag(tp, PTP_CAPABLE)) {
6151                 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6152                                         SOF_TIMESTAMPING_RX_HARDWARE |
6153                                         SOF_TIMESTAMPING_RAW_HARDWARE;
6154         }
6155
6156         if (tp->ptp_clock)
6157                 info->phc_index = ptp_clock_index(tp->ptp_clock);
6158         else
6159                 info->phc_index = -1;
6160
6161         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6162
6163         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6164                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6165                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6166                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6167         return 0;
6168 }
6169
6170 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6171 {
6172         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6173         bool neg_adj = false;
6174         u32 correction = 0;
6175
6176         if (ppb < 0) {
6177                 neg_adj = true;
6178                 ppb = -ppb;
6179         }
6180
6181         /* Frequency adjustment is performed using hardware with a 24 bit
6182          * accumulator and a programmable correction value. On each clk, the
6183          * correction value gets added to the accumulator and when it
6184          * overflows, the time counter is incremented/decremented.
6185          *
6186          * So conversion from ppb to correction value is
6187          *              ppb * (1 << 24) / 1000000000
6188          */
6189         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6190                      TG3_EAV_REF_CLK_CORRECT_MASK;
6191
6192         tg3_full_lock(tp, 0);
6193
6194         if (correction)
6195                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6196                      TG3_EAV_REF_CLK_CORRECT_EN |
6197                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6198         else
6199                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6200
6201         tg3_full_unlock(tp);
6202
6203         return 0;
6204 }
6205
6206 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6207 {
6208         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6209
6210         tg3_full_lock(tp, 0);
6211         tp->ptp_adjust += delta;
6212         tg3_full_unlock(tp);
6213
6214         return 0;
6215 }
6216
6217 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
6218                             struct ptp_system_timestamp *sts)
6219 {
6220         u64 ns;
6221         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6222
6223         tg3_full_lock(tp, 0);
6224         ns = tg3_refclk_read(tp, sts);
6225         ns += tp->ptp_adjust;
6226         tg3_full_unlock(tp);
6227
6228         *ts = ns_to_timespec64(ns);
6229
6230         return 0;
6231 }
6232
6233 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6234                            const struct timespec64 *ts)
6235 {
6236         u64 ns;
6237         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6238
6239         ns = timespec64_to_ns(ts);
6240
6241         tg3_full_lock(tp, 0);
6242         tg3_refclk_write(tp, ns);
6243         tp->ptp_adjust = 0;
6244         tg3_full_unlock(tp);
6245
6246         return 0;
6247 }
6248
6249 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6250                           struct ptp_clock_request *rq, int on)
6251 {
6252         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6253         u32 clock_ctl;
6254         int rval = 0;
6255
6256         switch (rq->type) {
6257         case PTP_CLK_REQ_PEROUT:
6258                 /* Reject requests with unsupported flags */
6259                 if (rq->perout.flags)
6260                         return -EOPNOTSUPP;
6261
6262                 if (rq->perout.index != 0)
6263                         return -EINVAL;
6264
6265                 tg3_full_lock(tp, 0);
6266                 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6267                 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6268
6269                 if (on) {
6270                         u64 nsec;
6271
6272                         nsec = rq->perout.start.sec * 1000000000ULL +
6273                                rq->perout.start.nsec;
6274
6275                         if (rq->perout.period.sec || rq->perout.period.nsec) {
6276                                 netdev_warn(tp->dev,
6277                                             "Device supports only a one-shot timesync output, period must be 0\n");
6278                                 rval = -EINVAL;
6279                                 goto err_out;
6280                         }
6281
6282                         if (nsec & (1ULL << 63)) {
6283                                 netdev_warn(tp->dev,
6284                                             "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6285                                 rval = -EINVAL;
6286                                 goto err_out;
6287                         }
6288
6289                         tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6290                         tw32(TG3_EAV_WATCHDOG0_MSB,
6291                              TG3_EAV_WATCHDOG0_EN |
6292                              ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6293
6294                         tw32(TG3_EAV_REF_CLCK_CTL,
6295                              clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6296                 } else {
6297                         tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6298                         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6299                 }
6300
6301 err_out:
6302                 tg3_full_unlock(tp);
6303                 return rval;
6304
6305         default:
6306                 break;
6307         }
6308
6309         return -EOPNOTSUPP;
6310 }
6311
6312 static const struct ptp_clock_info tg3_ptp_caps = {
6313         .owner          = THIS_MODULE,
6314         .name           = "tg3 clock",
6315         .max_adj        = 250000000,
6316         .n_alarm        = 0,
6317         .n_ext_ts       = 0,
6318         .n_per_out      = 1,
6319         .n_pins         = 0,
6320         .pps            = 0,
6321         .adjfreq        = tg3_ptp_adjfreq,
6322         .adjtime        = tg3_ptp_adjtime,
6323         .gettimex64     = tg3_ptp_gettimex,
6324         .settime64      = tg3_ptp_settime,
6325         .enable         = tg3_ptp_enable,
6326 };
6327
6328 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6329                                      struct skb_shared_hwtstamps *timestamp)
6330 {
6331         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6332         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6333                                            tp->ptp_adjust);
6334 }
6335
6336 /* tp->lock must be held */
6337 static void tg3_ptp_init(struct tg3 *tp)
6338 {
6339         if (!tg3_flag(tp, PTP_CAPABLE))
6340                 return;
6341
6342         /* Initialize the hardware clock to the system time. */
6343         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6344         tp->ptp_adjust = 0;
6345         tp->ptp_info = tg3_ptp_caps;
6346 }
6347
6348 /* tp->lock must be held */
6349 static void tg3_ptp_resume(struct tg3 *tp)
6350 {
6351         if (!tg3_flag(tp, PTP_CAPABLE))
6352                 return;
6353
6354         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6355         tp->ptp_adjust = 0;
6356 }
6357
6358 static void tg3_ptp_fini(struct tg3 *tp)
6359 {
6360         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6361                 return;
6362
6363         ptp_clock_unregister(tp->ptp_clock);
6364         tp->ptp_clock = NULL;
6365         tp->ptp_adjust = 0;
6366 }
6367
6368 static inline int tg3_irq_sync(struct tg3 *tp)
6369 {
6370         return tp->irq_sync;
6371 }
6372
6373 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6374 {
6375         int i;
6376
6377         dst = (u32 *)((u8 *)dst + off);
6378         for (i = 0; i < len; i += sizeof(u32))
6379                 *dst++ = tr32(off + i);
6380 }
6381
6382 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6383 {
6384         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6385         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6386         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6387         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6388         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6389         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6390         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6391         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6392         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6393         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6394         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6395         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6396         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6397         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6398         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6399         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6400         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6401         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6402         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6403
6404         if (tg3_flag(tp, SUPPORT_MSIX))
6405                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6406
6407         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6408         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6409         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6410         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6411         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6412         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6413         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6414         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6415
6416         if (!tg3_flag(tp, 5705_PLUS)) {
6417                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6418                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6419                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6420         }
6421
6422         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6423         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6424         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6425         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6426         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6427
6428         if (tg3_flag(tp, NVRAM))
6429                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6430 }
6431
6432 static void tg3_dump_state(struct tg3 *tp)
6433 {
6434         int i;
6435         u32 *regs;
6436
6437         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6438         if (!regs)
6439                 return;
6440
6441         if (tg3_flag(tp, PCI_EXPRESS)) {
6442                 /* Read up to but not including private PCI registers */
6443                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6444                         regs[i / sizeof(u32)] = tr32(i);
6445         } else
6446                 tg3_dump_legacy_regs(tp, regs);
6447
6448         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6449                 if (!regs[i + 0] && !regs[i + 1] &&
6450                     !regs[i + 2] && !regs[i + 3])
6451                         continue;
6452
6453                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6454                            i * 4,
6455                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6456         }
6457
6458         kfree(regs);
6459
6460         for (i = 0; i < tp->irq_cnt; i++) {
6461                 struct tg3_napi *tnapi = &tp->napi[i];
6462
6463                 /* SW status block */
6464                 netdev_err(tp->dev,
6465                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6466                            i,
6467                            tnapi->hw_status->status,
6468                            tnapi->hw_status->status_tag,
6469                            tnapi->hw_status->rx_jumbo_consumer,
6470                            tnapi->hw_status->rx_consumer,
6471                            tnapi->hw_status->rx_mini_consumer,
6472                            tnapi->hw_status->idx[0].rx_producer,
6473                            tnapi->hw_status->idx[0].tx_consumer);
6474
6475                 netdev_err(tp->dev,
6476                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6477                            i,
6478                            tnapi->last_tag, tnapi->last_irq_tag,
6479                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6480                            tnapi->rx_rcb_ptr,
6481                            tnapi->prodring.rx_std_prod_idx,
6482                            tnapi->prodring.rx_std_cons_idx,
6483                            tnapi->prodring.rx_jmb_prod_idx,
6484                            tnapi->prodring.rx_jmb_cons_idx);
6485         }
6486 }
6487
6488 /* This is called whenever we suspect that the system chipset is re-
6489  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6490  * is bogus tx completions. We try to recover by setting the
6491  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6492  * in the workqueue.
6493  */
6494 static void tg3_tx_recover(struct tg3 *tp)
6495 {
6496         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6497                tp->write32_tx_mbox == tg3_write_indirect_mbox);
6498
6499         netdev_warn(tp->dev,
6500                     "The system may be re-ordering memory-mapped I/O "
6501                     "cycles to the network device, attempting to recover. "
6502                     "Please report the problem to the driver maintainer "
6503                     "and include system chipset information.\n");
6504
6505         tg3_flag_set(tp, TX_RECOVERY_PENDING);
6506 }
6507
6508 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6509 {
6510         /* Tell compiler to fetch tx indices from memory. */
6511         barrier();
6512         return tnapi->tx_pending -
6513                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6514 }
6515
6516 /* Tigon3 never reports partial packet sends.  So we do not
6517  * need special logic to handle SKBs that have not had all
6518  * of their frags sent yet, like SunGEM does.
6519  */
6520 static void tg3_tx(struct tg3_napi *tnapi)
6521 {
6522         struct tg3 *tp = tnapi->tp;
6523         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6524         u32 sw_idx = tnapi->tx_cons;
6525         struct netdev_queue *txq;
6526         int index = tnapi - tp->napi;
6527         unsigned int pkts_compl = 0, bytes_compl = 0;
6528
6529         if (tg3_flag(tp, ENABLE_TSS))
6530                 index--;
6531
6532         txq = netdev_get_tx_queue(tp->dev, index);
6533
6534         while (sw_idx != hw_idx) {
6535                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6536                 struct sk_buff *skb = ri->skb;
6537                 int i, tx_bug = 0;
6538
6539                 if (unlikely(skb == NULL)) {
6540                         tg3_tx_recover(tp);
6541                         return;
6542                 }
6543
6544                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6545                         struct skb_shared_hwtstamps timestamp;
6546                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6547                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6548
6549                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6550
6551                         skb_tstamp_tx(skb, &timestamp);
6552                 }
6553
6554                 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping),
6555                                  skb_headlen(skb), DMA_TO_DEVICE);
6556
6557                 ri->skb = NULL;
6558
6559                 while (ri->fragmented) {
6560                         ri->fragmented = false;
6561                         sw_idx = NEXT_TX(sw_idx);
6562                         ri = &tnapi->tx_buffers[sw_idx];
6563                 }
6564
6565                 sw_idx = NEXT_TX(sw_idx);
6566
6567                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6568                         ri = &tnapi->tx_buffers[sw_idx];
6569                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6570                                 tx_bug = 1;
6571
6572                         dma_unmap_page(&tp->pdev->dev,
6573                                        dma_unmap_addr(ri, mapping),
6574                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
6575                                        DMA_TO_DEVICE);
6576
6577                         while (ri->fragmented) {
6578                                 ri->fragmented = false;
6579                                 sw_idx = NEXT_TX(sw_idx);
6580                                 ri = &tnapi->tx_buffers[sw_idx];
6581                         }
6582
6583                         sw_idx = NEXT_TX(sw_idx);
6584                 }
6585
6586                 pkts_compl++;
6587                 bytes_compl += skb->len;
6588
6589                 dev_consume_skb_any(skb);
6590
6591                 if (unlikely(tx_bug)) {
6592                         tg3_tx_recover(tp);
6593                         return;
6594                 }
6595         }
6596
6597         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6598
6599         tnapi->tx_cons = sw_idx;
6600
6601         /* Need to make the tx_cons update visible to tg3_start_xmit()
6602          * before checking for netif_queue_stopped().  Without the
6603          * memory barrier, there is a small possibility that tg3_start_xmit()
6604          * will miss it and cause the queue to be stopped forever.
6605          */
6606         smp_mb();
6607
6608         if (unlikely(netif_tx_queue_stopped(txq) &&
6609                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6610                 __netif_tx_lock(txq, smp_processor_id());
6611                 if (netif_tx_queue_stopped(txq) &&
6612                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6613                         netif_tx_wake_queue(txq);
6614                 __netif_tx_unlock(txq);
6615         }
6616 }
6617
6618 static void tg3_frag_free(bool is_frag, void *data)
6619 {
6620         if (is_frag)
6621                 skb_free_frag(data);
6622         else
6623                 kfree(data);
6624 }
6625
6626 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6627 {
6628         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6629                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6630
6631         if (!ri->data)
6632                 return;
6633
6634         dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz,
6635                          DMA_FROM_DEVICE);
6636         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6637         ri->data = NULL;
6638 }
6639
6640
6641 /* Returns size of skb allocated or < 0 on error.
6642  *
6643  * We only need to fill in the address because the other members
6644  * of the RX descriptor are invariant, see tg3_init_rings.
6645  *
6646  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6647  * posting buffers we only dirty the first cache line of the RX
6648  * descriptor (containing the address).  Whereas for the RX status
6649  * buffers the cpu only reads the last cacheline of the RX descriptor
6650  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6651  */
6652 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6653                              u32 opaque_key, u32 dest_idx_unmasked,
6654                              unsigned int *frag_size)
6655 {
6656         struct tg3_rx_buffer_desc *desc;
6657         struct ring_info *map;
6658         u8 *data;
6659         dma_addr_t mapping;
6660         int skb_size, data_size, dest_idx;
6661
6662         switch (opaque_key) {
6663         case RXD_OPAQUE_RING_STD:
6664                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6665                 desc = &tpr->rx_std[dest_idx];
6666                 map = &tpr->rx_std_buffers[dest_idx];
6667                 data_size = tp->rx_pkt_map_sz;
6668                 break;
6669
6670         case RXD_OPAQUE_RING_JUMBO:
6671                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6672                 desc = &tpr->rx_jmb[dest_idx].std;
6673                 map = &tpr->rx_jmb_buffers[dest_idx];
6674                 data_size = TG3_RX_JMB_MAP_SZ;
6675                 break;
6676
6677         default:
6678                 return -EINVAL;
6679         }
6680
6681         /* Do not overwrite any of the map or rp information
6682          * until we are sure we can commit to a new buffer.
6683          *
6684          * Callers depend upon this behavior and assume that
6685          * we leave everything unchanged if we fail.
6686          */
6687         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6688                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6689         if (skb_size <= PAGE_SIZE) {
6690                 data = napi_alloc_frag(skb_size);
6691                 *frag_size = skb_size;
6692         } else {
6693                 data = kmalloc(skb_size, GFP_ATOMIC);
6694                 *frag_size = 0;
6695         }
6696         if (!data)
6697                 return -ENOMEM;
6698
6699         mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp),
6700                                  data_size, DMA_FROM_DEVICE);
6701         if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
6702                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6703                 return -EIO;
6704         }
6705
6706         map->data = data;
6707         dma_unmap_addr_set(map, mapping, mapping);
6708
6709         desc->addr_hi = ((u64)mapping >> 32);
6710         desc->addr_lo = ((u64)mapping & 0xffffffff);
6711
6712         return data_size;
6713 }
6714
6715 /* We only need to move over in the address because the other
6716  * members of the RX descriptor are invariant.  See notes above
6717  * tg3_alloc_rx_data for full details.
6718  */
6719 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6720                            struct tg3_rx_prodring_set *dpr,
6721                            u32 opaque_key, int src_idx,
6722                            u32 dest_idx_unmasked)
6723 {
6724         struct tg3 *tp = tnapi->tp;
6725         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6726         struct ring_info *src_map, *dest_map;
6727         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6728         int dest_idx;
6729
6730         switch (opaque_key) {
6731         case RXD_OPAQUE_RING_STD:
6732                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6733                 dest_desc = &dpr->rx_std[dest_idx];
6734                 dest_map = &dpr->rx_std_buffers[dest_idx];
6735                 src_desc = &spr->rx_std[src_idx];
6736                 src_map = &spr->rx_std_buffers[src_idx];
6737                 break;
6738
6739         case RXD_OPAQUE_RING_JUMBO:
6740                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6741                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6742                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6743                 src_desc = &spr->rx_jmb[src_idx].std;
6744                 src_map = &spr->rx_jmb_buffers[src_idx];
6745                 break;
6746
6747         default:
6748                 return;
6749         }
6750
6751         dest_map->data = src_map->data;
6752         dma_unmap_addr_set(dest_map, mapping,
6753                            dma_unmap_addr(src_map, mapping));
6754         dest_desc->addr_hi = src_desc->addr_hi;
6755         dest_desc->addr_lo = src_desc->addr_lo;
6756
6757         /* Ensure that the update to the skb happens after the physical
6758          * addresses have been transferred to the new BD location.
6759          */
6760         smp_wmb();
6761
6762         src_map->data = NULL;
6763 }
6764
6765 /* The RX ring scheme is composed of multiple rings which post fresh
6766  * buffers to the chip, and one special ring the chip uses to report
6767  * status back to the host.
6768  *
6769  * The special ring reports the status of received packets to the
6770  * host.  The chip does not write into the original descriptor the
6771  * RX buffer was obtained from.  The chip simply takes the original
6772  * descriptor as provided by the host, updates the status and length
6773  * field, then writes this into the next status ring entry.
6774  *
6775  * Each ring the host uses to post buffers to the chip is described
6776  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6777  * it is first placed into the on-chip ram.  When the packet's length
6778  * is known, it walks down the TG3_BDINFO entries to select the ring.
6779  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6780  * which is within the range of the new packet's length is chosen.
6781  *
6782  * The "separate ring for rx status" scheme may sound queer, but it makes
6783  * sense from a cache coherency perspective.  If only the host writes
6784  * to the buffer post rings, and only the chip writes to the rx status
6785  * rings, then cache lines never move beyond shared-modified state.
6786  * If both the host and chip were to write into the same ring, cache line
6787  * eviction could occur since both entities want it in an exclusive state.
6788  */
6789 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6790 {
6791         struct tg3 *tp = tnapi->tp;
6792         u32 work_mask, rx_std_posted = 0;
6793         u32 std_prod_idx, jmb_prod_idx;
6794         u32 sw_idx = tnapi->rx_rcb_ptr;
6795         u16 hw_idx;
6796         int received;
6797         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6798
6799         hw_idx = *(tnapi->rx_rcb_prod_idx);
6800         /*
6801          * We need to order the read of hw_idx and the read of
6802          * the opaque cookie.
6803          */
6804         rmb();
6805         work_mask = 0;
6806         received = 0;
6807         std_prod_idx = tpr->rx_std_prod_idx;
6808         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6809         while (sw_idx != hw_idx && budget > 0) {
6810                 struct ring_info *ri;
6811                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6812                 unsigned int len;
6813                 struct sk_buff *skb;
6814                 dma_addr_t dma_addr;
6815                 u32 opaque_key, desc_idx, *post_ptr;
6816                 u8 *data;
6817                 u64 tstamp = 0;
6818
6819                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6820                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6821                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6822                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6823                         dma_addr = dma_unmap_addr(ri, mapping);
6824                         data = ri->data;
6825                         post_ptr = &std_prod_idx;
6826                         rx_std_posted++;
6827                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6828                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6829                         dma_addr = dma_unmap_addr(ri, mapping);
6830                         data = ri->data;
6831                         post_ptr = &jmb_prod_idx;
6832                 } else
6833                         goto next_pkt_nopost;
6834
6835                 work_mask |= opaque_key;
6836
6837                 if (desc->err_vlan & RXD_ERR_MASK) {
6838                 drop_it:
6839                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6840                                        desc_idx, *post_ptr);
6841                 drop_it_no_recycle:
6842                         /* Other statistics kept track of by card. */
6843                         tp->rx_dropped++;
6844                         goto next_pkt;
6845                 }
6846
6847                 prefetch(data + TG3_RX_OFFSET(tp));
6848                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6849                       ETH_FCS_LEN;
6850
6851                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6852                      RXD_FLAG_PTPSTAT_PTPV1 ||
6853                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6854                      RXD_FLAG_PTPSTAT_PTPV2) {
6855                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6856                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6857                 }
6858
6859                 if (len > TG3_RX_COPY_THRESH(tp)) {
6860                         int skb_size;
6861                         unsigned int frag_size;
6862
6863                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6864                                                     *post_ptr, &frag_size);
6865                         if (skb_size < 0)
6866                                 goto drop_it;
6867
6868                         dma_unmap_single(&tp->pdev->dev, dma_addr, skb_size,
6869                                          DMA_FROM_DEVICE);
6870
6871                         /* Ensure that the update to the data happens
6872                          * after the usage of the old DMA mapping.
6873                          */
6874                         smp_wmb();
6875
6876                         ri->data = NULL;
6877
6878                         skb = build_skb(data, frag_size);
6879                         if (!skb) {
6880                                 tg3_frag_free(frag_size != 0, data);
6881                                 goto drop_it_no_recycle;
6882                         }
6883                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6884                 } else {
6885                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6886                                        desc_idx, *post_ptr);
6887
6888                         skb = netdev_alloc_skb(tp->dev,
6889                                                len + TG3_RAW_IP_ALIGN);
6890                         if (skb == NULL)
6891                                 goto drop_it_no_recycle;
6892
6893                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6894                         dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len,
6895                                                 DMA_FROM_DEVICE);
6896                         memcpy(skb->data,
6897                                data + TG3_RX_OFFSET(tp),
6898                                len);
6899                         dma_sync_single_for_device(&tp->pdev->dev, dma_addr,
6900                                                    len, DMA_FROM_DEVICE);
6901                 }
6902
6903                 skb_put(skb, len);
6904                 if (tstamp)
6905                         tg3_hwclock_to_timestamp(tp, tstamp,
6906                                                  skb_hwtstamps(skb));
6907
6908                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6909                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6910                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6911                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6912                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6913                 else
6914                         skb_checksum_none_assert(skb);
6915
6916                 skb->protocol = eth_type_trans(skb, tp->dev);
6917
6918                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6919                     skb->protocol != htons(ETH_P_8021Q) &&
6920                     skb->protocol != htons(ETH_P_8021AD)) {
6921                         dev_kfree_skb_any(skb);
6922                         goto drop_it_no_recycle;
6923                 }
6924
6925                 if (desc->type_flags & RXD_FLAG_VLAN &&
6926                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6927                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6928                                                desc->err_vlan & RXD_VLAN_MASK);
6929
6930                 napi_gro_receive(&tnapi->napi, skb);
6931
6932                 received++;
6933                 budget--;
6934
6935 next_pkt:
6936                 (*post_ptr)++;
6937
6938                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6939                         tpr->rx_std_prod_idx = std_prod_idx &
6940                                                tp->rx_std_ring_mask;
6941                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6942                                      tpr->rx_std_prod_idx);
6943                         work_mask &= ~RXD_OPAQUE_RING_STD;
6944                         rx_std_posted = 0;
6945                 }
6946 next_pkt_nopost:
6947                 sw_idx++;
6948                 sw_idx &= tp->rx_ret_ring_mask;
6949
6950                 /* Refresh hw_idx to see if there is new work */
6951                 if (sw_idx == hw_idx) {
6952                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6953                         rmb();
6954                 }
6955         }
6956
6957         /* ACK the status ring. */
6958         tnapi->rx_rcb_ptr = sw_idx;
6959         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6960
6961         /* Refill RX ring(s). */
6962         if (!tg3_flag(tp, ENABLE_RSS)) {
6963                 /* Sync BD data before updating mailbox */
6964                 wmb();
6965
6966                 if (work_mask & RXD_OPAQUE_RING_STD) {
6967                         tpr->rx_std_prod_idx = std_prod_idx &
6968                                                tp->rx_std_ring_mask;
6969                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6970                                      tpr->rx_std_prod_idx);
6971                 }
6972                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6973                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6974                                                tp->rx_jmb_ring_mask;
6975                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6976                                      tpr->rx_jmb_prod_idx);
6977                 }
6978         } else if (work_mask) {
6979                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6980                  * updated before the producer indices can be updated.
6981                  */
6982                 smp_wmb();
6983
6984                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6985                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6986
6987                 if (tnapi != &tp->napi[1]) {
6988                         tp->rx_refill = true;
6989                         napi_schedule(&tp->napi[1].napi);
6990                 }
6991         }
6992
6993         return received;
6994 }
6995
6996 static void tg3_poll_link(struct tg3 *tp)
6997 {
6998         /* handle link change and other phy events */
6999         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7000                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7001
7002                 if (sblk->status & SD_STATUS_LINK_CHG) {
7003                         sblk->status = SD_STATUS_UPDATED |
7004                                        (sblk->status & ~SD_STATUS_LINK_CHG);
7005                         spin_lock(&tp->lock);
7006                         if (tg3_flag(tp, USE_PHYLIB)) {
7007                                 tw32_f(MAC_STATUS,
7008                                      (MAC_STATUS_SYNC_CHANGED |
7009                                       MAC_STATUS_CFG_CHANGED |
7010                                       MAC_STATUS_MI_COMPLETION |
7011                                       MAC_STATUS_LNKSTATE_CHANGED));
7012                                 udelay(40);
7013                         } else
7014                                 tg3_setup_phy(tp, false);
7015                         spin_unlock(&tp->lock);
7016                 }
7017         }
7018 }
7019
7020 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7021                                 struct tg3_rx_prodring_set *dpr,
7022                                 struct tg3_rx_prodring_set *spr)
7023 {
7024         u32 si, di, cpycnt, src_prod_idx;
7025         int i, err = 0;
7026
7027         while (1) {
7028                 src_prod_idx = spr->rx_std_prod_idx;
7029
7030                 /* Make sure updates to the rx_std_buffers[] entries and the
7031                  * standard producer index are seen in the correct order.
7032                  */
7033                 smp_rmb();
7034
7035                 if (spr->rx_std_cons_idx == src_prod_idx)
7036                         break;
7037
7038                 if (spr->rx_std_cons_idx < src_prod_idx)
7039                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7040                 else
7041                         cpycnt = tp->rx_std_ring_mask + 1 -
7042                                  spr->rx_std_cons_idx;
7043
7044                 cpycnt = min(cpycnt,
7045                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7046
7047                 si = spr->rx_std_cons_idx;
7048                 di = dpr->rx_std_prod_idx;
7049
7050                 for (i = di; i < di + cpycnt; i++) {
7051                         if (dpr->rx_std_buffers[i].data) {
7052                                 cpycnt = i - di;
7053                                 err = -ENOSPC;
7054                                 break;
7055                         }
7056                 }
7057
7058                 if (!cpycnt)
7059                         break;
7060
7061                 /* Ensure that updates to the rx_std_buffers ring and the
7062                  * shadowed hardware producer ring from tg3_recycle_skb() are
7063                  * ordered correctly WRT the skb check above.
7064                  */
7065                 smp_rmb();
7066
7067                 memcpy(&dpr->rx_std_buffers[di],
7068                        &spr->rx_std_buffers[si],
7069                        cpycnt * sizeof(struct ring_info));
7070
7071                 for (i = 0; i < cpycnt; i++, di++, si++) {
7072                         struct tg3_rx_buffer_desc *sbd, *dbd;
7073                         sbd = &spr->rx_std[si];
7074                         dbd = &dpr->rx_std[di];
7075                         dbd->addr_hi = sbd->addr_hi;
7076                         dbd->addr_lo = sbd->addr_lo;
7077                 }
7078
7079                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7080                                        tp->rx_std_ring_mask;
7081                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7082                                        tp->rx_std_ring_mask;
7083         }
7084
7085         while (1) {
7086                 src_prod_idx = spr->rx_jmb_prod_idx;
7087
7088                 /* Make sure updates to the rx_jmb_buffers[] entries and
7089                  * the jumbo producer index are seen in the correct order.
7090                  */
7091                 smp_rmb();
7092
7093                 if (spr->rx_jmb_cons_idx == src_prod_idx)
7094                         break;
7095
7096                 if (spr->rx_jmb_cons_idx < src_prod_idx)
7097                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7098                 else
7099                         cpycnt = tp->rx_jmb_ring_mask + 1 -
7100                                  spr->rx_jmb_cons_idx;
7101
7102                 cpycnt = min(cpycnt,
7103                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7104
7105                 si = spr->rx_jmb_cons_idx;
7106                 di = dpr->rx_jmb_prod_idx;
7107
7108                 for (i = di; i < di + cpycnt; i++) {
7109                         if (dpr->rx_jmb_buffers[i].data) {
7110                                 cpycnt = i - di;
7111                                 err = -ENOSPC;
7112                                 break;
7113                         }
7114                 }
7115
7116                 if (!cpycnt)
7117                         break;
7118
7119                 /* Ensure that updates to the rx_jmb_buffers ring and the
7120                  * shadowed hardware producer ring from tg3_recycle_skb() are
7121                  * ordered correctly WRT the skb check above.
7122                  */
7123                 smp_rmb();
7124
7125                 memcpy(&dpr->rx_jmb_buffers[di],
7126                        &spr->rx_jmb_buffers[si],
7127                        cpycnt * sizeof(struct ring_info));
7128
7129                 for (i = 0; i < cpycnt; i++, di++, si++) {
7130                         struct tg3_rx_buffer_desc *sbd, *dbd;
7131                         sbd = &spr->rx_jmb[si].std;
7132                         dbd = &dpr->rx_jmb[di].std;
7133                         dbd->addr_hi = sbd->addr_hi;
7134                         dbd->addr_lo = sbd->addr_lo;
7135                 }
7136
7137                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7138                                        tp->rx_jmb_ring_mask;
7139                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7140                                        tp->rx_jmb_ring_mask;
7141         }
7142
7143         return err;
7144 }
7145
7146 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7147 {
7148         struct tg3 *tp = tnapi->tp;
7149
7150         /* run TX completion thread */
7151         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7152                 tg3_tx(tnapi);
7153                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7154                         return work_done;
7155         }
7156
7157         if (!tnapi->rx_rcb_prod_idx)
7158                 return work_done;
7159
7160         /* run RX thread, within the bounds set by NAPI.
7161          * All RX "locking" is done by ensuring outside
7162          * code synchronizes with tg3->napi.poll()
7163          */
7164         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7165                 work_done += tg3_rx(tnapi, budget - work_done);
7166
7167         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7168                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7169                 int i, err = 0;
7170                 u32 std_prod_idx = dpr->rx_std_prod_idx;
7171                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7172
7173                 tp->rx_refill = false;
7174                 for (i = 1; i <= tp->rxq_cnt; i++)
7175                         err |= tg3_rx_prodring_xfer(tp, dpr,
7176                                                     &tp->napi[i].prodring);
7177
7178                 wmb();
7179
7180                 if (std_prod_idx != dpr->rx_std_prod_idx)
7181                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7182                                      dpr->rx_std_prod_idx);
7183
7184                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7185                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7186                                      dpr->rx_jmb_prod_idx);
7187
7188                 if (err)
7189                         tw32_f(HOSTCC_MODE, tp->coal_now);
7190         }
7191
7192         return work_done;
7193 }
7194
7195 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7196 {
7197         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7198                 schedule_work(&tp->reset_task);
7199 }
7200
7201 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7202 {
7203         if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7204                 cancel_work_sync(&tp->reset_task);
7205         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7206 }
7207
7208 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7209 {
7210         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7211         struct tg3 *tp = tnapi->tp;
7212         int work_done = 0;
7213         struct tg3_hw_status *sblk = tnapi->hw_status;
7214
7215         while (1) {
7216                 work_done = tg3_poll_work(tnapi, work_done, budget);
7217
7218                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7219                         goto tx_recovery;
7220
7221                 if (unlikely(work_done >= budget))
7222                         break;
7223
7224                 /* tp->last_tag is used in tg3_int_reenable() below
7225                  * to tell the hw how much work has been processed,
7226                  * so we must read it before checking for more work.
7227                  */
7228                 tnapi->last_tag = sblk->status_tag;
7229                 tnapi->last_irq_tag = tnapi->last_tag;
7230                 rmb();
7231
7232                 /* check for RX/TX work to do */
7233                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7234                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7235
7236                         /* This test here is not race free, but will reduce
7237                          * the number of interrupts by looping again.
7238                          */
7239                         if (tnapi == &tp->napi[1] && tp->rx_refill)
7240                                 continue;
7241
7242                         napi_complete_done(napi, work_done);
7243                         /* Reenable interrupts. */
7244                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7245
7246                         /* This test here is synchronized by napi_schedule()
7247                          * and napi_complete() to close the race condition.
7248                          */
7249                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7250                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
7251                                                   HOSTCC_MODE_ENABLE |
7252                                                   tnapi->coal_now);
7253                         }
7254                         break;
7255                 }
7256         }
7257
7258         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7259         return work_done;
7260
7261 tx_recovery:
7262         /* work_done is guaranteed to be less than budget. */
7263         napi_complete(napi);
7264         tg3_reset_task_schedule(tp);
7265         return work_done;
7266 }
7267
7268 static void tg3_process_error(struct tg3 *tp)
7269 {
7270         u32 val;
7271         bool real_error = false;
7272
7273         if (tg3_flag(tp, ERROR_PROCESSED))
7274                 return;
7275
7276         /* Check Flow Attention register */
7277         val = tr32(HOSTCC_FLOW_ATTN);
7278         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7279                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7280                 real_error = true;
7281         }
7282
7283         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7284                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7285                 real_error = true;
7286         }
7287
7288         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7289                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7290                 real_error = true;
7291         }
7292
7293         if (!real_error)
7294                 return;
7295
7296         tg3_dump_state(tp);
7297
7298         tg3_flag_set(tp, ERROR_PROCESSED);
7299         tg3_reset_task_schedule(tp);
7300 }
7301
7302 static int tg3_poll(struct napi_struct *napi, int budget)
7303 {
7304         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7305         struct tg3 *tp = tnapi->tp;
7306         int work_done = 0;
7307         struct tg3_hw_status *sblk = tnapi->hw_status;
7308
7309         while (1) {
7310                 if (sblk->status & SD_STATUS_ERROR)
7311                         tg3_process_error(tp);
7312
7313                 tg3_poll_link(tp);
7314
7315                 work_done = tg3_poll_work(tnapi, work_done, budget);
7316
7317                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7318                         goto tx_recovery;
7319
7320                 if (unlikely(work_done >= budget))
7321                         break;
7322
7323                 if (tg3_flag(tp, TAGGED_STATUS)) {
7324                         /* tp->last_tag is used in tg3_int_reenable() below
7325                          * to tell the hw how much work has been processed,
7326                          * so we must read it before checking for more work.
7327                          */
7328                         tnapi->last_tag = sblk->status_tag;
7329                         tnapi->last_irq_tag = tnapi->last_tag;
7330                         rmb();
7331                 } else
7332                         sblk->status &= ~SD_STATUS_UPDATED;
7333
7334                 if (likely(!tg3_has_work(tnapi))) {
7335                         napi_complete_done(napi, work_done);
7336                         tg3_int_reenable(tnapi);
7337                         break;
7338                 }
7339         }
7340
7341         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7342         return work_done;
7343
7344 tx_recovery:
7345         /* work_done is guaranteed to be less than budget. */
7346         napi_complete(napi);
7347         tg3_reset_task_schedule(tp);
7348         return work_done;
7349 }
7350
7351 static void tg3_napi_disable(struct tg3 *tp)
7352 {
7353         int i;
7354
7355         for (i = tp->irq_cnt - 1; i >= 0; i--)
7356                 napi_disable(&tp->napi[i].napi);
7357 }
7358
7359 static void tg3_napi_enable(struct tg3 *tp)
7360 {
7361         int i;
7362
7363         for (i = 0; i < tp->irq_cnt; i++)
7364                 napi_enable(&tp->napi[i].napi);
7365 }
7366
7367 static void tg3_napi_init(struct tg3 *tp)
7368 {
7369         int i;
7370
7371         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7372         for (i = 1; i < tp->irq_cnt; i++)
7373                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7374 }
7375
7376 static void tg3_napi_fini(struct tg3 *tp)
7377 {
7378         int i;
7379
7380         for (i = 0; i < tp->irq_cnt; i++)
7381                 netif_napi_del(&tp->napi[i].napi);
7382 }
7383
7384 static inline void tg3_netif_stop(struct tg3 *tp)
7385 {
7386         netif_trans_update(tp->dev);    /* prevent tx timeout */
7387         tg3_napi_disable(tp);
7388         netif_carrier_off(tp->dev);
7389         netif_tx_disable(tp->dev);
7390 }
7391
7392 /* tp->lock must be held */
7393 static inline void tg3_netif_start(struct tg3 *tp)
7394 {
7395         tg3_ptp_resume(tp);
7396
7397         /* NOTE: unconditional netif_tx_wake_all_queues is only
7398          * appropriate so long as all callers are assured to
7399          * have free tx slots (such as after tg3_init_hw)
7400          */
7401         netif_tx_wake_all_queues(tp->dev);
7402
7403         if (tp->link_up)
7404                 netif_carrier_on(tp->dev);
7405
7406         tg3_napi_enable(tp);
7407         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7408         tg3_enable_ints(tp);
7409 }
7410
7411 static void tg3_irq_quiesce(struct tg3 *tp)
7412         __releases(tp->lock)
7413         __acquires(tp->lock)
7414 {
7415         int i;
7416
7417         BUG_ON(tp->irq_sync);
7418
7419         tp->irq_sync = 1;
7420         smp_mb();
7421
7422         spin_unlock_bh(&tp->lock);
7423
7424         for (i = 0; i < tp->irq_cnt; i++)
7425                 synchronize_irq(tp->napi[i].irq_vec);
7426
7427         spin_lock_bh(&tp->lock);
7428 }
7429
7430 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7431  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7432  * with as well.  Most of the time, this is not necessary except when
7433  * shutting down the device.
7434  */
7435 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7436 {
7437         spin_lock_bh(&tp->lock);
7438         if (irq_sync)
7439                 tg3_irq_quiesce(tp);
7440 }
7441
7442 static inline void tg3_full_unlock(struct tg3 *tp)
7443 {
7444         spin_unlock_bh(&tp->lock);
7445 }
7446
7447 /* One-shot MSI handler - Chip automatically disables interrupt
7448  * after sending MSI so driver doesn't have to do it.
7449  */
7450 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7451 {
7452         struct tg3_napi *tnapi = dev_id;
7453         struct tg3 *tp = tnapi->tp;
7454
7455         prefetch(tnapi->hw_status);
7456         if (tnapi->rx_rcb)
7457                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7458
7459         if (likely(!tg3_irq_sync(tp)))
7460                 napi_schedule(&tnapi->napi);
7461
7462         return IRQ_HANDLED;
7463 }
7464
7465 /* MSI ISR - No need to check for interrupt sharing and no need to
7466  * flush status block and interrupt mailbox. PCI ordering rules
7467  * guarantee that MSI will arrive after the status block.
7468  */
7469 static irqreturn_t tg3_msi(int irq, void *dev_id)
7470 {
7471         struct tg3_napi *tnapi = dev_id;
7472         struct tg3 *tp = tnapi->tp;
7473
7474         prefetch(tnapi->hw_status);
7475         if (tnapi->rx_rcb)
7476                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7477         /*
7478          * Writing any value to intr-mbox-0 clears PCI INTA# and
7479          * chip-internal interrupt pending events.
7480          * Writing non-zero to intr-mbox-0 additional tells the
7481          * NIC to stop sending us irqs, engaging "in-intr-handler"
7482          * event coalescing.
7483          */
7484         tw32_mailbox(tnapi->int_mbox, 0x00000001);
7485         if (likely(!tg3_irq_sync(tp)))
7486                 napi_schedule(&tnapi->napi);
7487
7488         return IRQ_RETVAL(1);
7489 }
7490
7491 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7492 {
7493         struct tg3_napi *tnapi = dev_id;
7494         struct tg3 *tp = tnapi->tp;
7495         struct tg3_hw_status *sblk = tnapi->hw_status;
7496         unsigned int handled = 1;
7497
7498         /* In INTx mode, it is possible for the interrupt to arrive at
7499          * the CPU before the status block posted prior to the interrupt.
7500          * Reading the PCI State register will confirm whether the
7501          * interrupt is ours and will flush the status block.
7502          */
7503         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7504                 if (tg3_flag(tp, CHIP_RESETTING) ||
7505                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7506                         handled = 0;
7507                         goto out;
7508                 }
7509         }
7510
7511         /*
7512          * Writing any value to intr-mbox-0 clears PCI INTA# and
7513          * chip-internal interrupt pending events.
7514          * Writing non-zero to intr-mbox-0 additional tells the
7515          * NIC to stop sending us irqs, engaging "in-intr-handler"
7516          * event coalescing.
7517          *
7518          * Flush the mailbox to de-assert the IRQ immediately to prevent
7519          * spurious interrupts.  The flush impacts performance but
7520          * excessive spurious interrupts can be worse in some cases.
7521          */
7522         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7523         if (tg3_irq_sync(tp))
7524                 goto out;
7525         sblk->status &= ~SD_STATUS_UPDATED;
7526         if (likely(tg3_has_work(tnapi))) {
7527                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7528                 napi_schedule(&tnapi->napi);
7529         } else {
7530                 /* No work, shared interrupt perhaps?  re-enable
7531                  * interrupts, and flush that PCI write
7532                  */
7533                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7534                                0x00000000);
7535         }
7536 out:
7537         return IRQ_RETVAL(handled);
7538 }
7539
7540 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7541 {
7542         struct tg3_napi *tnapi = dev_id;
7543         struct tg3 *tp = tnapi->tp;
7544         struct tg3_hw_status *sblk = tnapi->hw_status;
7545         unsigned int handled = 1;
7546
7547         /* In INTx mode, it is possible for the interrupt to arrive at
7548          * the CPU before the status block posted prior to the interrupt.
7549          * Reading the PCI State register will confirm whether the
7550          * interrupt is ours and will flush the status block.
7551          */
7552         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7553                 if (tg3_flag(tp, CHIP_RESETTING) ||
7554                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7555                         handled = 0;
7556                         goto out;
7557                 }
7558         }
7559
7560         /*
7561          * writing any value to intr-mbox-0 clears PCI INTA# and
7562          * chip-internal interrupt pending events.
7563          * writing non-zero to intr-mbox-0 additional tells the
7564          * NIC to stop sending us irqs, engaging "in-intr-handler"
7565          * event coalescing.
7566          *
7567          * Flush the mailbox to de-assert the IRQ immediately to prevent
7568          * spurious interrupts.  The flush impacts performance but
7569          * excessive spurious interrupts can be worse in some cases.
7570          */
7571         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7572
7573         /*
7574          * In a shared interrupt configuration, sometimes other devices'
7575          * interrupts will scream.  We record the current status tag here
7576          * so that the above check can report that the screaming interrupts
7577          * are unhandled.  Eventually they will be silenced.
7578          */
7579         tnapi->last_irq_tag = sblk->status_tag;
7580
7581         if (tg3_irq_sync(tp))
7582                 goto out;
7583
7584         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7585
7586         napi_schedule(&tnapi->napi);
7587
7588 out:
7589         return IRQ_RETVAL(handled);
7590 }
7591
7592 /* ISR for interrupt test */
7593 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7594 {
7595         struct tg3_napi *tnapi = dev_id;
7596         struct tg3 *tp = tnapi->tp;
7597         struct tg3_hw_status *sblk = tnapi->hw_status;
7598
7599         if ((sblk->status & SD_STATUS_UPDATED) ||
7600             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7601                 tg3_disable_ints(tp);
7602                 return IRQ_RETVAL(1);
7603         }
7604         return IRQ_RETVAL(0);
7605 }
7606
7607 #ifdef CONFIG_NET_POLL_CONTROLLER
7608 static void tg3_poll_controller(struct net_device *dev)
7609 {
7610         int i;
7611         struct tg3 *tp = netdev_priv(dev);
7612
7613         if (tg3_irq_sync(tp))
7614                 return;
7615
7616         for (i = 0; i < tp->irq_cnt; i++)
7617                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7618 }
7619 #endif
7620
7621 static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue)
7622 {
7623         struct tg3 *tp = netdev_priv(dev);
7624
7625         if (netif_msg_tx_err(tp)) {
7626                 netdev_err(dev, "transmit timed out, resetting\n");
7627                 tg3_dump_state(tp);
7628         }
7629
7630         tg3_reset_task_schedule(tp);
7631 }
7632
7633 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7634 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7635 {
7636         u32 base = (u32) mapping & 0xffffffff;
7637
7638         return base + len + 8 < base;
7639 }
7640
7641 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7642  * of any 4GB boundaries: 4G, 8G, etc
7643  */
7644 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7645                                            u32 len, u32 mss)
7646 {
7647         if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7648                 u32 base = (u32) mapping & 0xffffffff;
7649
7650                 return ((base + len + (mss & 0x3fff)) < base);
7651         }
7652         return 0;
7653 }
7654
7655 /* Test for DMA addresses > 40-bit */
7656 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7657                                           int len)
7658 {
7659 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7660         if (tg3_flag(tp, 40BIT_DMA_BUG))
7661                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7662         return 0;
7663 #else
7664         return 0;
7665 #endif
7666 }
7667
7668 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7669                                  dma_addr_t mapping, u32 len, u32 flags,
7670                                  u32 mss, u32 vlan)
7671 {
7672         txbd->addr_hi = ((u64) mapping >> 32);
7673         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7674         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7675         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7676 }
7677
7678 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7679                             dma_addr_t map, u32 len, u32 flags,
7680                             u32 mss, u32 vlan)
7681 {
7682         struct tg3 *tp = tnapi->tp;
7683         bool hwbug = false;
7684
7685         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7686                 hwbug = true;
7687
7688         if (tg3_4g_overflow_test(map, len))
7689                 hwbug = true;
7690
7691         if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7692                 hwbug = true;
7693
7694         if (tg3_40bit_overflow_test(tp, map, len))
7695                 hwbug = true;
7696
7697         if (tp->dma_limit) {
7698                 u32 prvidx = *entry;
7699                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7700                 while (len > tp->dma_limit && *budget) {
7701                         u32 frag_len = tp->dma_limit;
7702                         len -= tp->dma_limit;
7703
7704                         /* Avoid the 8byte DMA problem */
7705                         if (len <= 8) {
7706                                 len += tp->dma_limit / 2;
7707                                 frag_len = tp->dma_limit / 2;
7708                         }
7709
7710                         tnapi->tx_buffers[*entry].fragmented = true;
7711
7712                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7713                                       frag_len, tmp_flag, mss, vlan);
7714                         *budget -= 1;
7715                         prvidx = *entry;
7716                         *entry = NEXT_TX(*entry);
7717
7718                         map += frag_len;
7719                 }
7720
7721                 if (len) {
7722                         if (*budget) {
7723                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7724                                               len, flags, mss, vlan);
7725                                 *budget -= 1;
7726                                 *entry = NEXT_TX(*entry);
7727                         } else {
7728                                 hwbug = true;
7729                                 tnapi->tx_buffers[prvidx].fragmented = false;
7730                         }
7731                 }
7732         } else {
7733                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7734                               len, flags, mss, vlan);
7735                 *entry = NEXT_TX(*entry);
7736         }
7737
7738         return hwbug;
7739 }
7740
7741 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7742 {
7743         int i;
7744         struct sk_buff *skb;
7745         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7746
7747         skb = txb->skb;
7748         txb->skb = NULL;
7749
7750         dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping),
7751                          skb_headlen(skb), DMA_TO_DEVICE);
7752
7753         while (txb->fragmented) {
7754                 txb->fragmented = false;
7755                 entry = NEXT_TX(entry);
7756                 txb = &tnapi->tx_buffers[entry];
7757         }
7758
7759         for (i = 0; i <= last; i++) {
7760                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7761
7762                 entry = NEXT_TX(entry);
7763                 txb = &tnapi->tx_buffers[entry];
7764
7765                 dma_unmap_page(&tnapi->tp->pdev->dev,
7766                                dma_unmap_addr(txb, mapping),
7767                                skb_frag_size(frag), DMA_TO_DEVICE);
7768
7769                 while (txb->fragmented) {
7770                         txb->fragmented = false;
7771                         entry = NEXT_TX(entry);
7772                         txb = &tnapi->tx_buffers[entry];
7773                 }
7774         }
7775 }
7776
7777 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7778 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7779                                        struct sk_buff **pskb,
7780                                        u32 *entry, u32 *budget,
7781                                        u32 base_flags, u32 mss, u32 vlan)
7782 {
7783         struct tg3 *tp = tnapi->tp;
7784         struct sk_buff *new_skb, *skb = *pskb;
7785         dma_addr_t new_addr = 0;
7786         int ret = 0;
7787
7788         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7789                 new_skb = skb_copy(skb, GFP_ATOMIC);
7790         else {
7791                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7792
7793                 new_skb = skb_copy_expand(skb,
7794                                           skb_headroom(skb) + more_headroom,
7795                                           skb_tailroom(skb), GFP_ATOMIC);
7796         }
7797
7798         if (!new_skb) {
7799                 ret = -1;
7800         } else {
7801                 /* New SKB is guaranteed to be linear. */
7802                 new_addr = dma_map_single(&tp->pdev->dev, new_skb->data,
7803                                           new_skb->len, DMA_TO_DEVICE);
7804                 /* Make sure the mapping succeeded */
7805                 if (dma_mapping_error(&tp->pdev->dev, new_addr)) {
7806                         dev_kfree_skb_any(new_skb);
7807                         ret = -1;
7808                 } else {
7809                         u32 save_entry = *entry;
7810
7811                         base_flags |= TXD_FLAG_END;
7812
7813                         tnapi->tx_buffers[*entry].skb = new_skb;
7814                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7815                                            mapping, new_addr);
7816
7817                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7818                                             new_skb->len, base_flags,
7819                                             mss, vlan)) {
7820                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7821                                 dev_kfree_skb_any(new_skb);
7822                                 ret = -1;
7823                         }
7824                 }
7825         }
7826
7827         dev_consume_skb_any(skb);
7828         *pskb = new_skb;
7829         return ret;
7830 }
7831
7832 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7833 {
7834         /* Check if we will never have enough descriptors,
7835          * as gso_segs can be more than current ring size
7836          */
7837         return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7838 }
7839
7840 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7841
7842 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7843  * indicated in tg3_tx_frag_set()
7844  */
7845 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7846                        struct netdev_queue *txq, struct sk_buff *skb)
7847 {
7848         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7849         struct sk_buff *segs, *seg, *next;
7850
7851         /* Estimate the number of fragments in the worst case */
7852         if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7853                 netif_tx_stop_queue(txq);
7854
7855                 /* netif_tx_stop_queue() must be done before checking
7856                  * checking tx index in tg3_tx_avail() below, because in
7857                  * tg3_tx(), we update tx index before checking for
7858                  * netif_tx_queue_stopped().
7859                  */
7860                 smp_mb();
7861                 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7862                         return NETDEV_TX_BUSY;
7863
7864                 netif_tx_wake_queue(txq);
7865         }
7866
7867         segs = skb_gso_segment(skb, tp->dev->features &
7868                                     ~(NETIF_F_TSO | NETIF_F_TSO6));
7869         if (IS_ERR(segs) || !segs)
7870                 goto tg3_tso_bug_end;
7871
7872         skb_list_walk_safe(segs, seg, next) {
7873                 skb_mark_not_on_list(seg);
7874                 tg3_start_xmit(seg, tp->dev);
7875         }
7876
7877 tg3_tso_bug_end:
7878         dev_consume_skb_any(skb);
7879
7880         return NETDEV_TX_OK;
7881 }
7882
7883 /* hard_start_xmit for all devices */
7884 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7885 {
7886         struct tg3 *tp = netdev_priv(dev);
7887         u32 len, entry, base_flags, mss, vlan = 0;
7888         u32 budget;
7889         int i = -1, would_hit_hwbug;
7890         dma_addr_t mapping;
7891         struct tg3_napi *tnapi;
7892         struct netdev_queue *txq;
7893         unsigned int last;
7894         struct iphdr *iph = NULL;
7895         struct tcphdr *tcph = NULL;
7896         __sum16 tcp_csum = 0, ip_csum = 0;
7897         __be16 ip_tot_len = 0;
7898
7899         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7900         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7901         if (tg3_flag(tp, ENABLE_TSS))
7902                 tnapi++;
7903
7904         budget = tg3_tx_avail(tnapi);
7905
7906         /* We are running in BH disabled context with netif_tx_lock
7907          * and TX reclaim runs via tp->napi.poll inside of a software
7908          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7909          * no IRQ context deadlocks to worry about either.  Rejoice!
7910          */
7911         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7912                 if (!netif_tx_queue_stopped(txq)) {
7913                         netif_tx_stop_queue(txq);
7914
7915                         /* This is a hard error, log it. */
7916                         netdev_err(dev,
7917                                    "BUG! Tx Ring full when queue awake!\n");
7918                 }
7919                 return NETDEV_TX_BUSY;
7920         }
7921
7922         entry = tnapi->tx_prod;
7923         base_flags = 0;
7924
7925         mss = skb_shinfo(skb)->gso_size;
7926         if (mss) {
7927                 u32 tcp_opt_len, hdr_len;
7928
7929                 if (skb_cow_head(skb, 0))
7930                         goto drop;
7931
7932                 iph = ip_hdr(skb);
7933                 tcp_opt_len = tcp_optlen(skb);
7934
7935                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7936
7937                 /* HW/FW can not correctly segment packets that have been
7938                  * vlan encapsulated.
7939                  */
7940                 if (skb->protocol == htons(ETH_P_8021Q) ||
7941                     skb->protocol == htons(ETH_P_8021AD)) {
7942                         if (tg3_tso_bug_gso_check(tnapi, skb))
7943                                 return tg3_tso_bug(tp, tnapi, txq, skb);
7944                         goto drop;
7945                 }
7946
7947                 if (!skb_is_gso_v6(skb)) {
7948                         if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7949                             tg3_flag(tp, TSO_BUG)) {
7950                                 if (tg3_tso_bug_gso_check(tnapi, skb))
7951                                         return tg3_tso_bug(tp, tnapi, txq, skb);
7952                                 goto drop;
7953                         }
7954                         ip_csum = iph->check;
7955                         ip_tot_len = iph->tot_len;
7956                         iph->check = 0;
7957                         iph->tot_len = htons(mss + hdr_len);
7958                 }
7959
7960                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7961                                TXD_FLAG_CPU_POST_DMA);
7962
7963                 tcph = tcp_hdr(skb);
7964                 tcp_csum = tcph->check;
7965
7966                 if (tg3_flag(tp, HW_TSO_1) ||
7967                     tg3_flag(tp, HW_TSO_2) ||
7968                     tg3_flag(tp, HW_TSO_3)) {
7969                         tcph->check = 0;
7970                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7971                 } else {
7972                         tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7973                                                          0, IPPROTO_TCP, 0);
7974                 }
7975
7976                 if (tg3_flag(tp, HW_TSO_3)) {
7977                         mss |= (hdr_len & 0xc) << 12;
7978                         if (hdr_len & 0x10)
7979                                 base_flags |= 0x00000010;
7980                         base_flags |= (hdr_len & 0x3e0) << 5;
7981                 } else if (tg3_flag(tp, HW_TSO_2))
7982                         mss |= hdr_len << 9;
7983                 else if (tg3_flag(tp, HW_TSO_1) ||
7984                          tg3_asic_rev(tp) == ASIC_REV_5705) {
7985                         if (tcp_opt_len || iph->ihl > 5) {
7986                                 int tsflags;
7987
7988                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7989                                 mss |= (tsflags << 11);
7990                         }
7991                 } else {
7992                         if (tcp_opt_len || iph->ihl > 5) {
7993                                 int tsflags;
7994
7995                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7996                                 base_flags |= tsflags << 12;
7997                         }
7998                 }
7999         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8000                 /* HW/FW can not correctly checksum packets that have been
8001                  * vlan encapsulated.
8002                  */
8003                 if (skb->protocol == htons(ETH_P_8021Q) ||
8004                     skb->protocol == htons(ETH_P_8021AD)) {
8005                         if (skb_checksum_help(skb))
8006                                 goto drop;
8007                 } else  {
8008                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
8009                 }
8010         }
8011
8012         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8013             !mss && skb->len > VLAN_ETH_FRAME_LEN)
8014                 base_flags |= TXD_FLAG_JMB_PKT;
8015
8016         if (skb_vlan_tag_present(skb)) {
8017                 base_flags |= TXD_FLAG_VLAN;
8018                 vlan = skb_vlan_tag_get(skb);
8019         }
8020
8021         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8022             tg3_flag(tp, TX_TSTAMP_EN)) {
8023                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8024                 base_flags |= TXD_FLAG_HWTSTAMP;
8025         }
8026
8027         len = skb_headlen(skb);
8028
8029         mapping = dma_map_single(&tp->pdev->dev, skb->data, len,
8030                                  DMA_TO_DEVICE);
8031         if (dma_mapping_error(&tp->pdev->dev, mapping))
8032                 goto drop;
8033
8034
8035         tnapi->tx_buffers[entry].skb = skb;
8036         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8037
8038         would_hit_hwbug = 0;
8039
8040         if (tg3_flag(tp, 5701_DMA_BUG))
8041                 would_hit_hwbug = 1;
8042
8043         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8044                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8045                             mss, vlan)) {
8046                 would_hit_hwbug = 1;
8047         } else if (skb_shinfo(skb)->nr_frags > 0) {
8048                 u32 tmp_mss = mss;
8049
8050                 if (!tg3_flag(tp, HW_TSO_1) &&
8051                     !tg3_flag(tp, HW_TSO_2) &&
8052                     !tg3_flag(tp, HW_TSO_3))
8053                         tmp_mss = 0;
8054
8055                 /* Now loop through additional data
8056                  * fragments, and queue them.
8057                  */
8058                 last = skb_shinfo(skb)->nr_frags - 1;
8059                 for (i = 0; i <= last; i++) {
8060                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8061
8062                         len = skb_frag_size(frag);
8063                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8064                                                    len, DMA_TO_DEVICE);
8065
8066                         tnapi->tx_buffers[entry].skb = NULL;
8067                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8068                                            mapping);
8069                         if (dma_mapping_error(&tp->pdev->dev, mapping))
8070                                 goto dma_error;
8071
8072                         if (!budget ||
8073                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8074                                             len, base_flags |
8075                                             ((i == last) ? TXD_FLAG_END : 0),
8076                                             tmp_mss, vlan)) {
8077                                 would_hit_hwbug = 1;
8078                                 break;
8079                         }
8080                 }
8081         }
8082
8083         if (would_hit_hwbug) {
8084                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8085
8086                 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8087                         /* If it's a TSO packet, do GSO instead of
8088                          * allocating and copying to a large linear SKB
8089                          */
8090                         if (ip_tot_len) {
8091                                 iph->check = ip_csum;
8092                                 iph->tot_len = ip_tot_len;
8093                         }
8094                         tcph->check = tcp_csum;
8095                         return tg3_tso_bug(tp, tnapi, txq, skb);
8096                 }
8097
8098                 /* If the workaround fails due to memory/mapping
8099                  * failure, silently drop this packet.
8100                  */
8101                 entry = tnapi->tx_prod;
8102                 budget = tg3_tx_avail(tnapi);
8103                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8104                                                 base_flags, mss, vlan))
8105                         goto drop_nofree;
8106         }
8107
8108         skb_tx_timestamp(skb);
8109         netdev_tx_sent_queue(txq, skb->len);
8110
8111         /* Sync BD data before updating mailbox */
8112         wmb();
8113
8114         tnapi->tx_prod = entry;
8115         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8116                 netif_tx_stop_queue(txq);
8117
8118                 /* netif_tx_stop_queue() must be done before checking
8119                  * checking tx index in tg3_tx_avail() below, because in
8120                  * tg3_tx(), we update tx index before checking for
8121                  * netif_tx_queue_stopped().
8122                  */
8123                 smp_mb();
8124                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8125                         netif_tx_wake_queue(txq);
8126         }
8127
8128         if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
8129                 /* Packets are ready, update Tx producer idx on card. */
8130                 tw32_tx_mbox(tnapi->prodmbox, entry);
8131         }
8132
8133         return NETDEV_TX_OK;
8134
8135 dma_error:
8136         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8137         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8138 drop:
8139         dev_kfree_skb_any(skb);
8140 drop_nofree:
8141         tp->tx_dropped++;
8142         return NETDEV_TX_OK;
8143 }
8144
8145 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8146 {
8147         if (enable) {
8148                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8149                                   MAC_MODE_PORT_MODE_MASK);
8150
8151                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8152
8153                 if (!tg3_flag(tp, 5705_PLUS))
8154                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8155
8156                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8157                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8158                 else
8159                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8160         } else {
8161                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8162
8163                 if (tg3_flag(tp, 5705_PLUS) ||
8164                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8165                     tg3_asic_rev(tp) == ASIC_REV_5700)
8166                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8167         }
8168
8169         tw32(MAC_MODE, tp->mac_mode);
8170         udelay(40);
8171 }
8172
8173 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8174 {
8175         u32 val, bmcr, mac_mode, ptest = 0;
8176
8177         tg3_phy_toggle_apd(tp, false);
8178         tg3_phy_toggle_automdix(tp, false);
8179
8180         if (extlpbk && tg3_phy_set_extloopbk(tp))
8181                 return -EIO;
8182
8183         bmcr = BMCR_FULLDPLX;
8184         switch (speed) {
8185         case SPEED_10:
8186                 break;
8187         case SPEED_100:
8188                 bmcr |= BMCR_SPEED100;
8189                 break;
8190         case SPEED_1000:
8191         default:
8192                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8193                         speed = SPEED_100;
8194                         bmcr |= BMCR_SPEED100;
8195                 } else {
8196                         speed = SPEED_1000;
8197                         bmcr |= BMCR_SPEED1000;
8198                 }
8199         }
8200
8201         if (extlpbk) {
8202                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8203                         tg3_readphy(tp, MII_CTRL1000, &val);
8204                         val |= CTL1000_AS_MASTER |
8205                                CTL1000_ENABLE_MASTER;
8206                         tg3_writephy(tp, MII_CTRL1000, val);
8207                 } else {
8208                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8209                                 MII_TG3_FET_PTEST_TRIM_2;
8210                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8211                 }
8212         } else
8213                 bmcr |= BMCR_LOOPBACK;
8214
8215         tg3_writephy(tp, MII_BMCR, bmcr);
8216
8217         /* The write needs to be flushed for the FETs */
8218         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8219                 tg3_readphy(tp, MII_BMCR, &bmcr);
8220
8221         udelay(40);
8222
8223         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8224             tg3_asic_rev(tp) == ASIC_REV_5785) {
8225                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8226                              MII_TG3_FET_PTEST_FRC_TX_LINK |
8227                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
8228
8229                 /* The write needs to be flushed for the AC131 */
8230                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8231         }
8232
8233         /* Reset to prevent losing 1st rx packet intermittently */
8234         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8235             tg3_flag(tp, 5780_CLASS)) {
8236                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8237                 udelay(10);
8238                 tw32_f(MAC_RX_MODE, tp->rx_mode);
8239         }
8240
8241         mac_mode = tp->mac_mode &
8242                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8243         if (speed == SPEED_1000)
8244                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8245         else
8246                 mac_mode |= MAC_MODE_PORT_MODE_MII;
8247
8248         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8249                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8250
8251                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8252                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8253                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8254                         mac_mode |= MAC_MODE_LINK_POLARITY;
8255
8256                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8257                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8258         }
8259
8260         tw32(MAC_MODE, mac_mode);
8261         udelay(40);
8262
8263         return 0;
8264 }
8265
8266 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8267 {
8268         struct tg3 *tp = netdev_priv(dev);
8269
8270         if (features & NETIF_F_LOOPBACK) {
8271                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8272                         return;
8273
8274                 spin_lock_bh(&tp->lock);
8275                 tg3_mac_loopback(tp, true);
8276                 netif_carrier_on(tp->dev);
8277                 spin_unlock_bh(&tp->lock);
8278                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8279         } else {
8280                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8281                         return;
8282
8283                 spin_lock_bh(&tp->lock);
8284                 tg3_mac_loopback(tp, false);
8285                 /* Force link status check */
8286                 tg3_setup_phy(tp, true);
8287                 spin_unlock_bh(&tp->lock);
8288                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8289         }
8290 }
8291
8292 static netdev_features_t tg3_fix_features(struct net_device *dev,
8293         netdev_features_t features)
8294 {
8295         struct tg3 *tp = netdev_priv(dev);
8296
8297         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8298                 features &= ~NETIF_F_ALL_TSO;
8299
8300         return features;
8301 }
8302
8303 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8304 {
8305         netdev_features_t changed = dev->features ^ features;
8306
8307         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8308                 tg3_set_loopback(dev, features);
8309
8310         return 0;
8311 }
8312
8313 static void tg3_rx_prodring_free(struct tg3 *tp,
8314                                  struct tg3_rx_prodring_set *tpr)
8315 {
8316         int i;
8317
8318         if (tpr != &tp->napi[0].prodring) {
8319                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8320                      i = (i + 1) & tp->rx_std_ring_mask)
8321                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8322                                         tp->rx_pkt_map_sz);
8323
8324                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8325                         for (i = tpr->rx_jmb_cons_idx;
8326                              i != tpr->rx_jmb_prod_idx;
8327                              i = (i + 1) & tp->rx_jmb_ring_mask) {
8328                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8329                                                 TG3_RX_JMB_MAP_SZ);
8330                         }
8331                 }
8332
8333                 return;
8334         }
8335
8336         for (i = 0; i <= tp->rx_std_ring_mask; i++)
8337                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8338                                 tp->rx_pkt_map_sz);
8339
8340         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8341                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8342                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8343                                         TG3_RX_JMB_MAP_SZ);
8344         }
8345 }
8346
8347 /* Initialize rx rings for packet processing.
8348  *
8349  * The chip has been shut down and the driver detached from
8350  * the networking, so no interrupts or new tx packets will
8351  * end up in the driver.  tp->{tx,}lock are held and thus
8352  * we may not sleep.
8353  */
8354 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8355                                  struct tg3_rx_prodring_set *tpr)
8356 {
8357         u32 i, rx_pkt_dma_sz;
8358
8359         tpr->rx_std_cons_idx = 0;
8360         tpr->rx_std_prod_idx = 0;
8361         tpr->rx_jmb_cons_idx = 0;
8362         tpr->rx_jmb_prod_idx = 0;
8363
8364         if (tpr != &tp->napi[0].prodring) {
8365                 memset(&tpr->rx_std_buffers[0], 0,
8366                        TG3_RX_STD_BUFF_RING_SIZE(tp));
8367                 if (tpr->rx_jmb_buffers)
8368                         memset(&tpr->rx_jmb_buffers[0], 0,
8369                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
8370                 goto done;
8371         }
8372
8373         /* Zero out all descriptors. */
8374         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8375
8376         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8377         if (tg3_flag(tp, 5780_CLASS) &&
8378             tp->dev->mtu > ETH_DATA_LEN)
8379                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8380         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8381
8382         /* Initialize invariants of the rings, we only set this
8383          * stuff once.  This works because the card does not
8384          * write into the rx buffer posting rings.
8385          */
8386         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8387                 struct tg3_rx_buffer_desc *rxd;
8388
8389                 rxd = &tpr->rx_std[i];
8390                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8391                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8392                 rxd->opaque = (RXD_OPAQUE_RING_STD |
8393                                (i << RXD_OPAQUE_INDEX_SHIFT));
8394         }
8395
8396         /* Now allocate fresh SKBs for each rx ring. */
8397         for (i = 0; i < tp->rx_pending; i++) {
8398                 unsigned int frag_size;
8399
8400                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8401                                       &frag_size) < 0) {
8402                         netdev_warn(tp->dev,
8403                                     "Using a smaller RX standard ring. Only "
8404                                     "%d out of %d buffers were allocated "
8405                                     "successfully\n", i, tp->rx_pending);
8406                         if (i == 0)
8407                                 goto initfail;
8408                         tp->rx_pending = i;
8409                         break;
8410                 }
8411         }
8412
8413         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8414                 goto done;
8415
8416         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8417
8418         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8419                 goto done;
8420
8421         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8422                 struct tg3_rx_buffer_desc *rxd;
8423
8424                 rxd = &tpr->rx_jmb[i].std;
8425                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8426                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8427                                   RXD_FLAG_JUMBO;
8428                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8429                        (i << RXD_OPAQUE_INDEX_SHIFT));
8430         }
8431
8432         for (i = 0; i < tp->rx_jumbo_pending; i++) {
8433                 unsigned int frag_size;
8434
8435                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8436                                       &frag_size) < 0) {
8437                         netdev_warn(tp->dev,
8438                                     "Using a smaller RX jumbo ring. Only %d "
8439                                     "out of %d buffers were allocated "
8440                                     "successfully\n", i, tp->rx_jumbo_pending);
8441                         if (i == 0)
8442                                 goto initfail;
8443                         tp->rx_jumbo_pending = i;
8444                         break;
8445                 }
8446         }
8447
8448 done:
8449         return 0;
8450
8451 initfail:
8452         tg3_rx_prodring_free(tp, tpr);
8453         return -ENOMEM;
8454 }
8455
8456 static void tg3_rx_prodring_fini(struct tg3 *tp,
8457                                  struct tg3_rx_prodring_set *tpr)
8458 {
8459         kfree(tpr->rx_std_buffers);
8460         tpr->rx_std_buffers = NULL;
8461         kfree(tpr->rx_jmb_buffers);
8462         tpr->rx_jmb_buffers = NULL;
8463         if (tpr->rx_std) {
8464                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8465                                   tpr->rx_std, tpr->rx_std_mapping);
8466                 tpr->rx_std = NULL;
8467         }
8468         if (tpr->rx_jmb) {
8469                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8470                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
8471                 tpr->rx_jmb = NULL;
8472         }
8473 }
8474
8475 static int tg3_rx_prodring_init(struct tg3 *tp,
8476                                 struct tg3_rx_prodring_set *tpr)
8477 {
8478         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8479                                       GFP_KERNEL);
8480         if (!tpr->rx_std_buffers)
8481                 return -ENOMEM;
8482
8483         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8484                                          TG3_RX_STD_RING_BYTES(tp),
8485                                          &tpr->rx_std_mapping,
8486                                          GFP_KERNEL);
8487         if (!tpr->rx_std)
8488                 goto err_out;
8489
8490         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8491                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8492                                               GFP_KERNEL);
8493                 if (!tpr->rx_jmb_buffers)
8494                         goto err_out;
8495
8496                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8497                                                  TG3_RX_JMB_RING_BYTES(tp),
8498                                                  &tpr->rx_jmb_mapping,
8499                                                  GFP_KERNEL);
8500                 if (!tpr->rx_jmb)
8501                         goto err_out;
8502         }
8503
8504         return 0;
8505
8506 err_out:
8507         tg3_rx_prodring_fini(tp, tpr);
8508         return -ENOMEM;
8509 }
8510
8511 /* Free up pending packets in all rx/tx rings.
8512  *
8513  * The chip has been shut down and the driver detached from
8514  * the networking, so no interrupts or new tx packets will
8515  * end up in the driver.  tp->{tx,}lock is not held and we are not
8516  * in an interrupt context and thus may sleep.
8517  */
8518 static void tg3_free_rings(struct tg3 *tp)
8519 {
8520         int i, j;
8521
8522         for (j = 0; j < tp->irq_cnt; j++) {
8523                 struct tg3_napi *tnapi = &tp->napi[j];
8524
8525                 tg3_rx_prodring_free(tp, &tnapi->prodring);
8526
8527                 if (!tnapi->tx_buffers)
8528                         continue;
8529
8530                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8531                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8532
8533                         if (!skb)
8534                                 continue;
8535
8536                         tg3_tx_skb_unmap(tnapi, i,
8537                                          skb_shinfo(skb)->nr_frags - 1);
8538
8539                         dev_consume_skb_any(skb);
8540                 }
8541                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8542         }
8543 }
8544
8545 /* Initialize tx/rx rings for packet processing.
8546  *
8547  * The chip has been shut down and the driver detached from
8548  * the networking, so no interrupts or new tx packets will
8549  * end up in the driver.  tp->{tx,}lock are held and thus
8550  * we may not sleep.
8551  */
8552 static int tg3_init_rings(struct tg3 *tp)
8553 {
8554         int i;
8555
8556         /* Free up all the SKBs. */
8557         tg3_free_rings(tp);
8558
8559         for (i = 0; i < tp->irq_cnt; i++) {
8560                 struct tg3_napi *tnapi = &tp->napi[i];
8561
8562                 tnapi->last_tag = 0;
8563                 tnapi->last_irq_tag = 0;
8564                 tnapi->hw_status->status = 0;
8565                 tnapi->hw_status->status_tag = 0;
8566                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8567
8568                 tnapi->tx_prod = 0;
8569                 tnapi->tx_cons = 0;
8570                 if (tnapi->tx_ring)
8571                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8572
8573                 tnapi->rx_rcb_ptr = 0;
8574                 if (tnapi->rx_rcb)
8575                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8576
8577                 if (tnapi->prodring.rx_std &&
8578                     tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8579                         tg3_free_rings(tp);
8580                         return -ENOMEM;
8581                 }
8582         }
8583
8584         return 0;
8585 }
8586
8587 static void tg3_mem_tx_release(struct tg3 *tp)
8588 {
8589         int i;
8590
8591         for (i = 0; i < tp->irq_max; i++) {
8592                 struct tg3_napi *tnapi = &tp->napi[i];
8593
8594                 if (tnapi->tx_ring) {
8595                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8596                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
8597                         tnapi->tx_ring = NULL;
8598                 }
8599
8600                 kfree(tnapi->tx_buffers);
8601                 tnapi->tx_buffers = NULL;
8602         }
8603 }
8604
8605 static int tg3_mem_tx_acquire(struct tg3 *tp)
8606 {
8607         int i;
8608         struct tg3_napi *tnapi = &tp->napi[0];
8609
8610         /* If multivector TSS is enabled, vector 0 does not handle
8611          * tx interrupts.  Don't allocate any resources for it.
8612          */
8613         if (tg3_flag(tp, ENABLE_TSS))
8614                 tnapi++;
8615
8616         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8617                 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8618                                             sizeof(struct tg3_tx_ring_info),
8619                                             GFP_KERNEL);
8620                 if (!tnapi->tx_buffers)
8621                         goto err_out;
8622
8623                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8624                                                     TG3_TX_RING_BYTES,
8625                                                     &tnapi->tx_desc_mapping,
8626                                                     GFP_KERNEL);
8627                 if (!tnapi->tx_ring)
8628                         goto err_out;
8629         }
8630
8631         return 0;
8632
8633 err_out:
8634         tg3_mem_tx_release(tp);
8635         return -ENOMEM;
8636 }
8637
8638 static void tg3_mem_rx_release(struct tg3 *tp)
8639 {
8640         int i;
8641
8642         for (i = 0; i < tp->irq_max; i++) {
8643                 struct tg3_napi *tnapi = &tp->napi[i];
8644
8645                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8646
8647                 if (!tnapi->rx_rcb)
8648                         continue;
8649
8650                 dma_free_coherent(&tp->pdev->dev,
8651                                   TG3_RX_RCB_RING_BYTES(tp),
8652                                   tnapi->rx_rcb,
8653                                   tnapi->rx_rcb_mapping);
8654                 tnapi->rx_rcb = NULL;
8655         }
8656 }
8657
8658 static int tg3_mem_rx_acquire(struct tg3 *tp)
8659 {
8660         unsigned int i, limit;
8661
8662         limit = tp->rxq_cnt;
8663
8664         /* If RSS is enabled, we need a (dummy) producer ring
8665          * set on vector zero.  This is the true hw prodring.
8666          */
8667         if (tg3_flag(tp, ENABLE_RSS))
8668                 limit++;
8669
8670         for (i = 0; i < limit; i++) {
8671                 struct tg3_napi *tnapi = &tp->napi[i];
8672
8673                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8674                         goto err_out;
8675
8676                 /* If multivector RSS is enabled, vector 0
8677                  * does not handle rx or tx interrupts.
8678                  * Don't allocate any resources for it.
8679                  */
8680                 if (!i && tg3_flag(tp, ENABLE_RSS))
8681                         continue;
8682
8683                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8684                                                    TG3_RX_RCB_RING_BYTES(tp),
8685                                                    &tnapi->rx_rcb_mapping,
8686                                                    GFP_KERNEL);
8687                 if (!tnapi->rx_rcb)
8688                         goto err_out;
8689         }
8690
8691         return 0;
8692
8693 err_out:
8694         tg3_mem_rx_release(tp);
8695         return -ENOMEM;
8696 }
8697
8698 /*
8699  * Must not be invoked with interrupt sources disabled and
8700  * the hardware shutdown down.
8701  */
8702 static void tg3_free_consistent(struct tg3 *tp)
8703 {
8704         int i;
8705
8706         for (i = 0; i < tp->irq_cnt; i++) {
8707                 struct tg3_napi *tnapi = &tp->napi[i];
8708
8709                 if (tnapi->hw_status) {
8710                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8711                                           tnapi->hw_status,
8712                                           tnapi->status_mapping);
8713                         tnapi->hw_status = NULL;
8714                 }
8715         }
8716
8717         tg3_mem_rx_release(tp);
8718         tg3_mem_tx_release(tp);
8719
8720         /* tp->hw_stats can be referenced safely:
8721          *     1. under rtnl_lock
8722          *     2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8723          */
8724         if (tp->hw_stats) {
8725                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8726                                   tp->hw_stats, tp->stats_mapping);
8727                 tp->hw_stats = NULL;
8728         }
8729 }
8730
8731 /*
8732  * Must not be invoked with interrupt sources disabled and
8733  * the hardware shutdown down.  Can sleep.
8734  */
8735 static int tg3_alloc_consistent(struct tg3 *tp)
8736 {
8737         int i;
8738
8739         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8740                                           sizeof(struct tg3_hw_stats),
8741                                           &tp->stats_mapping, GFP_KERNEL);
8742         if (!tp->hw_stats)
8743                 goto err_out;
8744
8745         for (i = 0; i < tp->irq_cnt; i++) {
8746                 struct tg3_napi *tnapi = &tp->napi[i];
8747                 struct tg3_hw_status *sblk;
8748
8749                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8750                                                       TG3_HW_STATUS_SIZE,
8751                                                       &tnapi->status_mapping,
8752                                                       GFP_KERNEL);
8753                 if (!tnapi->hw_status)
8754                         goto err_out;
8755
8756                 sblk = tnapi->hw_status;
8757
8758                 if (tg3_flag(tp, ENABLE_RSS)) {
8759                         u16 *prodptr = NULL;
8760
8761                         /*
8762                          * When RSS is enabled, the status block format changes
8763                          * slightly.  The "rx_jumbo_consumer", "reserved",
8764                          * and "rx_mini_consumer" members get mapped to the
8765                          * other three rx return ring producer indexes.
8766                          */
8767                         switch (i) {
8768                         case 1:
8769                                 prodptr = &sblk->idx[0].rx_producer;
8770                                 break;
8771                         case 2:
8772                                 prodptr = &sblk->rx_jumbo_consumer;
8773                                 break;
8774                         case 3:
8775                                 prodptr = &sblk->reserved;
8776                                 break;
8777                         case 4:
8778                                 prodptr = &sblk->rx_mini_consumer;
8779                                 break;
8780                         }
8781                         tnapi->rx_rcb_prod_idx = prodptr;
8782                 } else {
8783                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8784                 }
8785         }
8786
8787         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8788                 goto err_out;
8789
8790         return 0;
8791
8792 err_out:
8793         tg3_free_consistent(tp);
8794         return -ENOMEM;
8795 }
8796
8797 #define MAX_WAIT_CNT 1000
8798
8799 /* To stop a block, clear the enable bit and poll till it
8800  * clears.  tp->lock is held.
8801  */
8802 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8803 {
8804         unsigned int i;
8805         u32 val;
8806
8807         if (tg3_flag(tp, 5705_PLUS)) {
8808                 switch (ofs) {
8809                 case RCVLSC_MODE:
8810                 case DMAC_MODE:
8811                 case MBFREE_MODE:
8812                 case BUFMGR_MODE:
8813                 case MEMARB_MODE:
8814                         /* We can't enable/disable these bits of the
8815                          * 5705/5750, just say success.
8816                          */
8817                         return 0;
8818
8819                 default:
8820                         break;
8821                 }
8822         }
8823
8824         val = tr32(ofs);
8825         val &= ~enable_bit;
8826         tw32_f(ofs, val);
8827
8828         for (i = 0; i < MAX_WAIT_CNT; i++) {
8829                 if (pci_channel_offline(tp->pdev)) {
8830                         dev_err(&tp->pdev->dev,
8831                                 "tg3_stop_block device offline, "
8832                                 "ofs=%lx enable_bit=%x\n",
8833                                 ofs, enable_bit);
8834                         return -ENODEV;
8835                 }
8836
8837                 udelay(100);
8838                 val = tr32(ofs);
8839                 if ((val & enable_bit) == 0)
8840                         break;
8841         }
8842
8843         if (i == MAX_WAIT_CNT && !silent) {
8844                 dev_err(&tp->pdev->dev,
8845                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8846                         ofs, enable_bit);
8847                 return -ENODEV;
8848         }
8849
8850         return 0;
8851 }
8852
8853 /* tp->lock is held. */
8854 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8855 {
8856         int i, err;
8857
8858         tg3_disable_ints(tp);
8859
8860         if (pci_channel_offline(tp->pdev)) {
8861                 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8862                 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8863                 err = -ENODEV;
8864                 goto err_no_dev;
8865         }
8866
8867         tp->rx_mode &= ~RX_MODE_ENABLE;
8868         tw32_f(MAC_RX_MODE, tp->rx_mode);
8869         udelay(10);
8870
8871         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8872         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8873         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8874         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8875         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8876         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8877
8878         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8879         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8880         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8881         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8882         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8883         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8884         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8885
8886         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8887         tw32_f(MAC_MODE, tp->mac_mode);
8888         udelay(40);
8889
8890         tp->tx_mode &= ~TX_MODE_ENABLE;
8891         tw32_f(MAC_TX_MODE, tp->tx_mode);
8892
8893         for (i = 0; i < MAX_WAIT_CNT; i++) {
8894                 udelay(100);
8895                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8896                         break;
8897         }
8898         if (i >= MAX_WAIT_CNT) {
8899                 dev_err(&tp->pdev->dev,
8900                         "%s timed out, TX_MODE_ENABLE will not clear "
8901                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8902                 err |= -ENODEV;
8903         }
8904
8905         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8906         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8907         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8908
8909         tw32(FTQ_RESET, 0xffffffff);
8910         tw32(FTQ_RESET, 0x00000000);
8911
8912         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8913         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8914
8915 err_no_dev:
8916         for (i = 0; i < tp->irq_cnt; i++) {
8917                 struct tg3_napi *tnapi = &tp->napi[i];
8918                 if (tnapi->hw_status)
8919                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8920         }
8921
8922         return err;
8923 }
8924
8925 /* Save PCI command register before chip reset */
8926 static void tg3_save_pci_state(struct tg3 *tp)
8927 {
8928         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8929 }
8930
8931 /* Restore PCI state after chip reset */
8932 static void tg3_restore_pci_state(struct tg3 *tp)
8933 {
8934         u32 val;
8935
8936         /* Re-enable indirect register accesses. */
8937         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8938                                tp->misc_host_ctrl);
8939
8940         /* Set MAX PCI retry to zero. */
8941         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8942         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8943             tg3_flag(tp, PCIX_MODE))
8944                 val |= PCISTATE_RETRY_SAME_DMA;
8945         /* Allow reads and writes to the APE register and memory space. */
8946         if (tg3_flag(tp, ENABLE_APE))
8947                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8948                        PCISTATE_ALLOW_APE_SHMEM_WR |
8949                        PCISTATE_ALLOW_APE_PSPACE_WR;
8950         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8951
8952         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8953
8954         if (!tg3_flag(tp, PCI_EXPRESS)) {
8955                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8956                                       tp->pci_cacheline_sz);
8957                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8958                                       tp->pci_lat_timer);
8959         }
8960
8961         /* Make sure PCI-X relaxed ordering bit is clear. */
8962         if (tg3_flag(tp, PCIX_MODE)) {
8963                 u16 pcix_cmd;
8964
8965                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8966                                      &pcix_cmd);
8967                 pcix_cmd &= ~PCI_X_CMD_ERO;
8968                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8969                                       pcix_cmd);
8970         }
8971
8972         if (tg3_flag(tp, 5780_CLASS)) {
8973
8974                 /* Chip reset on 5780 will reset MSI enable bit,
8975                  * so need to restore it.
8976                  */
8977                 if (tg3_flag(tp, USING_MSI)) {
8978                         u16 ctrl;
8979
8980                         pci_read_config_word(tp->pdev,
8981                                              tp->msi_cap + PCI_MSI_FLAGS,
8982                                              &ctrl);
8983                         pci_write_config_word(tp->pdev,
8984                                               tp->msi_cap + PCI_MSI_FLAGS,
8985                                               ctrl | PCI_MSI_FLAGS_ENABLE);
8986                         val = tr32(MSGINT_MODE);
8987                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8988                 }
8989         }
8990 }
8991
8992 static void tg3_override_clk(struct tg3 *tp)
8993 {
8994         u32 val;
8995
8996         switch (tg3_asic_rev(tp)) {
8997         case ASIC_REV_5717:
8998                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
8999                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9000                      TG3_CPMU_MAC_ORIDE_ENABLE);
9001                 break;
9002
9003         case ASIC_REV_5719:
9004         case ASIC_REV_5720:
9005                 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9006                 break;
9007
9008         default:
9009                 return;
9010         }
9011 }
9012
9013 static void tg3_restore_clk(struct tg3 *tp)
9014 {
9015         u32 val;
9016
9017         switch (tg3_asic_rev(tp)) {
9018         case ASIC_REV_5717:
9019                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9020                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9021                      val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9022                 break;
9023
9024         case ASIC_REV_5719:
9025         case ASIC_REV_5720:
9026                 val = tr32(TG3_CPMU_CLCK_ORIDE);
9027                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9028                 break;
9029
9030         default:
9031                 return;
9032         }
9033 }
9034
9035 /* tp->lock is held. */
9036 static int tg3_chip_reset(struct tg3 *tp)
9037         __releases(tp->lock)
9038         __acquires(tp->lock)
9039 {
9040         u32 val;
9041         void (*write_op)(struct tg3 *, u32, u32);
9042         int i, err;
9043
9044         if (!pci_device_is_present(tp->pdev))
9045                 return -ENODEV;
9046
9047         tg3_nvram_lock(tp);
9048
9049         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9050
9051         /* No matching tg3_nvram_unlock() after this because
9052          * chip reset below will undo the nvram lock.
9053          */
9054         tp->nvram_lock_cnt = 0;
9055
9056         /* GRC_MISC_CFG core clock reset will clear the memory
9057          * enable bit in PCI register 4 and the MSI enable bit
9058          * on some chips, so we save relevant registers here.
9059          */
9060         tg3_save_pci_state(tp);
9061
9062         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9063             tg3_flag(tp, 5755_PLUS))
9064                 tw32(GRC_FASTBOOT_PC, 0);
9065
9066         /*
9067          * We must avoid the readl() that normally takes place.
9068          * It locks machines, causes machine checks, and other
9069          * fun things.  So, temporarily disable the 5701
9070          * hardware workaround, while we do the reset.
9071          */
9072         write_op = tp->write32;
9073         if (write_op == tg3_write_flush_reg32)
9074                 tp->write32 = tg3_write32;
9075
9076         /* Prevent the irq handler from reading or writing PCI registers
9077          * during chip reset when the memory enable bit in the PCI command
9078          * register may be cleared.  The chip does not generate interrupt
9079          * at this time, but the irq handler may still be called due to irq
9080          * sharing or irqpoll.
9081          */
9082         tg3_flag_set(tp, CHIP_RESETTING);
9083         for (i = 0; i < tp->irq_cnt; i++) {
9084                 struct tg3_napi *tnapi = &tp->napi[i];
9085                 if (tnapi->hw_status) {
9086                         tnapi->hw_status->status = 0;
9087                         tnapi->hw_status->status_tag = 0;
9088                 }
9089                 tnapi->last_tag = 0;
9090                 tnapi->last_irq_tag = 0;
9091         }
9092         smp_mb();
9093
9094         tg3_full_unlock(tp);
9095
9096         for (i = 0; i < tp->irq_cnt; i++)
9097                 synchronize_irq(tp->napi[i].irq_vec);
9098
9099         tg3_full_lock(tp, 0);
9100
9101         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9102                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9103                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9104         }
9105
9106         /* do the reset */
9107         val = GRC_MISC_CFG_CORECLK_RESET;
9108
9109         if (tg3_flag(tp, PCI_EXPRESS)) {
9110                 /* Force PCIe 1.0a mode */
9111                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9112                     !tg3_flag(tp, 57765_PLUS) &&
9113                     tr32(TG3_PCIE_PHY_TSTCTL) ==
9114                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9115                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9116
9117                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9118                         tw32(GRC_MISC_CFG, (1 << 29));
9119                         val |= (1 << 29);
9120                 }
9121         }
9122
9123         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9124                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9125                 tw32(GRC_VCPU_EXT_CTRL,
9126                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9127         }
9128
9129         /* Set the clock to the highest frequency to avoid timeouts. With link
9130          * aware mode, the clock speed could be slow and bootcode does not
9131          * complete within the expected time. Override the clock to allow the
9132          * bootcode to finish sooner and then restore it.
9133          */
9134         tg3_override_clk(tp);
9135
9136         /* Manage gphy power for all CPMU absent PCIe devices. */
9137         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9138                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9139
9140         tw32(GRC_MISC_CFG, val);
9141
9142         /* restore 5701 hardware bug workaround write method */
9143         tp->write32 = write_op;
9144
9145         /* Unfortunately, we have to delay before the PCI read back.
9146          * Some 575X chips even will not respond to a PCI cfg access
9147          * when the reset command is given to the chip.
9148          *
9149          * How do these hardware designers expect things to work
9150          * properly if the PCI write is posted for a long period
9151          * of time?  It is always necessary to have some method by
9152          * which a register read back can occur to push the write
9153          * out which does the reset.
9154          *
9155          * For most tg3 variants the trick below was working.
9156          * Ho hum...
9157          */
9158         udelay(120);
9159
9160         /* Flush PCI posted writes.  The normal MMIO registers
9161          * are inaccessible at this time so this is the only
9162          * way to make this reliably (actually, this is no longer
9163          * the case, see above).  I tried to use indirect
9164          * register read/write but this upset some 5701 variants.
9165          */
9166         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9167
9168         udelay(120);
9169
9170         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9171                 u16 val16;
9172
9173                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9174                         int j;
9175                         u32 cfg_val;
9176
9177                         /* Wait for link training to complete.  */
9178                         for (j = 0; j < 5000; j++)
9179                                 udelay(100);
9180
9181                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9182                         pci_write_config_dword(tp->pdev, 0xc4,
9183                                                cfg_val | (1 << 15));
9184                 }
9185
9186                 /* Clear the "no snoop" and "relaxed ordering" bits. */
9187                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9188                 /*
9189                  * Older PCIe devices only support the 128 byte
9190                  * MPS setting.  Enforce the restriction.
9191                  */
9192                 if (!tg3_flag(tp, CPMU_PRESENT))
9193                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9194                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9195
9196                 /* Clear error status */
9197                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9198                                       PCI_EXP_DEVSTA_CED |
9199                                       PCI_EXP_DEVSTA_NFED |
9200                                       PCI_EXP_DEVSTA_FED |
9201                                       PCI_EXP_DEVSTA_URD);
9202         }
9203
9204         tg3_restore_pci_state(tp);
9205
9206         tg3_flag_clear(tp, CHIP_RESETTING);
9207         tg3_flag_clear(tp, ERROR_PROCESSED);
9208
9209         val = 0;
9210         if (tg3_flag(tp, 5780_CLASS))
9211                 val = tr32(MEMARB_MODE);
9212         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9213
9214         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9215                 tg3_stop_fw(tp);
9216                 tw32(0x5000, 0x400);
9217         }
9218
9219         if (tg3_flag(tp, IS_SSB_CORE)) {
9220                 /*
9221                  * BCM4785: In order to avoid repercussions from using
9222                  * potentially defective internal ROM, stop the Rx RISC CPU,
9223                  * which is not required.
9224                  */
9225                 tg3_stop_fw(tp);
9226                 tg3_halt_cpu(tp, RX_CPU_BASE);
9227         }
9228
9229         err = tg3_poll_fw(tp);
9230         if (err)
9231                 return err;
9232
9233         tw32(GRC_MODE, tp->grc_mode);
9234
9235         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9236                 val = tr32(0xc4);
9237
9238                 tw32(0xc4, val | (1 << 15));
9239         }
9240
9241         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9242             tg3_asic_rev(tp) == ASIC_REV_5705) {
9243                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9244                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9245                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9246                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9247         }
9248
9249         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9250                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9251                 val = tp->mac_mode;
9252         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9253                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9254                 val = tp->mac_mode;
9255         } else
9256                 val = 0;
9257
9258         tw32_f(MAC_MODE, val);
9259         udelay(40);
9260
9261         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9262
9263         tg3_mdio_start(tp);
9264
9265         if (tg3_flag(tp, PCI_EXPRESS) &&
9266             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9267             tg3_asic_rev(tp) != ASIC_REV_5785 &&
9268             !tg3_flag(tp, 57765_PLUS)) {
9269                 val = tr32(0x7c00);
9270
9271                 tw32(0x7c00, val | (1 << 25));
9272         }
9273
9274         tg3_restore_clk(tp);
9275
9276         /* Increase the core clock speed to fix tx timeout issue for 5762
9277          * with 100Mbps link speed.
9278          */
9279         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9280                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9281                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9282                      TG3_CPMU_MAC_ORIDE_ENABLE);
9283         }
9284
9285         /* Reprobe ASF enable state.  */
9286         tg3_flag_clear(tp, ENABLE_ASF);
9287         tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9288                            TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9289
9290         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9291         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9292         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9293                 u32 nic_cfg;
9294
9295                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9296                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9297                         tg3_flag_set(tp, ENABLE_ASF);
9298                         tp->last_event_jiffies = jiffies;
9299                         if (tg3_flag(tp, 5750_PLUS))
9300                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9301
9302                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9303                         if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9304                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9305                         if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9306                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9307                 }
9308         }
9309
9310         return 0;
9311 }
9312
9313 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9314 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9315 static void __tg3_set_rx_mode(struct net_device *);
9316
9317 /* tp->lock is held. */
9318 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9319 {
9320         int err;
9321
9322         tg3_stop_fw(tp);
9323
9324         tg3_write_sig_pre_reset(tp, kind);
9325
9326         tg3_abort_hw(tp, silent);
9327         err = tg3_chip_reset(tp);
9328
9329         __tg3_set_mac_addr(tp, false);
9330
9331         tg3_write_sig_legacy(tp, kind);
9332         tg3_write_sig_post_reset(tp, kind);
9333
9334         if (tp->hw_stats) {
9335                 /* Save the stats across chip resets... */
9336                 tg3_get_nstats(tp, &tp->net_stats_prev);
9337                 tg3_get_estats(tp, &tp->estats_prev);
9338
9339                 /* And make sure the next sample is new data */
9340                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9341         }
9342
9343         return err;
9344 }
9345
9346 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9347 {
9348         struct tg3 *tp = netdev_priv(dev);
9349         struct sockaddr *addr = p;
9350         int err = 0;
9351         bool skip_mac_1 = false;
9352
9353         if (!is_valid_ether_addr(addr->sa_data))
9354                 return -EADDRNOTAVAIL;
9355
9356         eth_hw_addr_set(dev, addr->sa_data);
9357
9358         if (!netif_running(dev))
9359                 return 0;
9360
9361         if (tg3_flag(tp, ENABLE_ASF)) {
9362                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9363
9364                 addr0_high = tr32(MAC_ADDR_0_HIGH);
9365                 addr0_low = tr32(MAC_ADDR_0_LOW);
9366                 addr1_high = tr32(MAC_ADDR_1_HIGH);
9367                 addr1_low = tr32(MAC_ADDR_1_LOW);
9368
9369                 /* Skip MAC addr 1 if ASF is using it. */
9370                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9371                     !(addr1_high == 0 && addr1_low == 0))
9372                         skip_mac_1 = true;
9373         }
9374         spin_lock_bh(&tp->lock);
9375         __tg3_set_mac_addr(tp, skip_mac_1);
9376         __tg3_set_rx_mode(dev);
9377         spin_unlock_bh(&tp->lock);
9378
9379         return err;
9380 }
9381
9382 /* tp->lock is held. */
9383 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9384                            dma_addr_t mapping, u32 maxlen_flags,
9385                            u32 nic_addr)
9386 {
9387         tg3_write_mem(tp,
9388                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9389                       ((u64) mapping >> 32));
9390         tg3_write_mem(tp,
9391                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9392                       ((u64) mapping & 0xffffffff));
9393         tg3_write_mem(tp,
9394                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9395                        maxlen_flags);
9396
9397         if (!tg3_flag(tp, 5705_PLUS))
9398                 tg3_write_mem(tp,
9399                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9400                               nic_addr);
9401 }
9402
9403
9404 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9405 {
9406         int i = 0;
9407
9408         if (!tg3_flag(tp, ENABLE_TSS)) {
9409                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9410                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9411                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9412         } else {
9413                 tw32(HOSTCC_TXCOL_TICKS, 0);
9414                 tw32(HOSTCC_TXMAX_FRAMES, 0);
9415                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9416
9417                 for (; i < tp->txq_cnt; i++) {
9418                         u32 reg;
9419
9420                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9421                         tw32(reg, ec->tx_coalesce_usecs);
9422                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9423                         tw32(reg, ec->tx_max_coalesced_frames);
9424                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9425                         tw32(reg, ec->tx_max_coalesced_frames_irq);
9426                 }
9427         }
9428
9429         for (; i < tp->irq_max - 1; i++) {
9430                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9431                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9432                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9433         }
9434 }
9435
9436 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9437 {
9438         int i = 0;
9439         u32 limit = tp->rxq_cnt;
9440
9441         if (!tg3_flag(tp, ENABLE_RSS)) {
9442                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9443                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9444                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9445                 limit--;
9446         } else {
9447                 tw32(HOSTCC_RXCOL_TICKS, 0);
9448                 tw32(HOSTCC_RXMAX_FRAMES, 0);
9449                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9450         }
9451
9452         for (; i < limit; i++) {
9453                 u32 reg;
9454
9455                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9456                 tw32(reg, ec->rx_coalesce_usecs);
9457                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9458                 tw32(reg, ec->rx_max_coalesced_frames);
9459                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9460                 tw32(reg, ec->rx_max_coalesced_frames_irq);
9461         }
9462
9463         for (; i < tp->irq_max - 1; i++) {
9464                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9465                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9466                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9467         }
9468 }
9469
9470 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9471 {
9472         tg3_coal_tx_init(tp, ec);
9473         tg3_coal_rx_init(tp, ec);
9474
9475         if (!tg3_flag(tp, 5705_PLUS)) {
9476                 u32 val = ec->stats_block_coalesce_usecs;
9477
9478                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9479                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9480
9481                 if (!tp->link_up)
9482                         val = 0;
9483
9484                 tw32(HOSTCC_STAT_COAL_TICKS, val);
9485         }
9486 }
9487
9488 /* tp->lock is held. */
9489 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9490 {
9491         u32 txrcb, limit;
9492
9493         /* Disable all transmit rings but the first. */
9494         if (!tg3_flag(tp, 5705_PLUS))
9495                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9496         else if (tg3_flag(tp, 5717_PLUS))
9497                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9498         else if (tg3_flag(tp, 57765_CLASS) ||
9499                  tg3_asic_rev(tp) == ASIC_REV_5762)
9500                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9501         else
9502                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9503
9504         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9505              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9506                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9507                               BDINFO_FLAGS_DISABLED);
9508 }
9509
9510 /* tp->lock is held. */
9511 static void tg3_tx_rcbs_init(struct tg3 *tp)
9512 {
9513         int i = 0;
9514         u32 txrcb = NIC_SRAM_SEND_RCB;
9515
9516         if (tg3_flag(tp, ENABLE_TSS))
9517                 i++;
9518
9519         for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9520                 struct tg3_napi *tnapi = &tp->napi[i];
9521
9522                 if (!tnapi->tx_ring)
9523                         continue;
9524
9525                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9526                                (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9527                                NIC_SRAM_TX_BUFFER_DESC);
9528         }
9529 }
9530
9531 /* tp->lock is held. */
9532 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9533 {
9534         u32 rxrcb, limit;
9535
9536         /* Disable all receive return rings but the first. */
9537         if (tg3_flag(tp, 5717_PLUS))
9538                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9539         else if (!tg3_flag(tp, 5705_PLUS))
9540                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9541         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9542                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
9543                  tg3_flag(tp, 57765_CLASS))
9544                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9545         else
9546                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9547
9548         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9549              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9550                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9551                               BDINFO_FLAGS_DISABLED);
9552 }
9553
9554 /* tp->lock is held. */
9555 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9556 {
9557         int i = 0;
9558         u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9559
9560         if (tg3_flag(tp, ENABLE_RSS))
9561                 i++;
9562
9563         for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9564                 struct tg3_napi *tnapi = &tp->napi[i];
9565
9566                 if (!tnapi->rx_rcb)
9567                         continue;
9568
9569                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9570                                (tp->rx_ret_ring_mask + 1) <<
9571                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9572         }
9573 }
9574
9575 /* tp->lock is held. */
9576 static void tg3_rings_reset(struct tg3 *tp)
9577 {
9578         int i;
9579         u32 stblk;
9580         struct tg3_napi *tnapi = &tp->napi[0];
9581
9582         tg3_tx_rcbs_disable(tp);
9583
9584         tg3_rx_ret_rcbs_disable(tp);
9585
9586         /* Disable interrupts */
9587         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9588         tp->napi[0].chk_msi_cnt = 0;
9589         tp->napi[0].last_rx_cons = 0;
9590         tp->napi[0].last_tx_cons = 0;
9591
9592         /* Zero mailbox registers. */
9593         if (tg3_flag(tp, SUPPORT_MSIX)) {
9594                 for (i = 1; i < tp->irq_max; i++) {
9595                         tp->napi[i].tx_prod = 0;
9596                         tp->napi[i].tx_cons = 0;
9597                         if (tg3_flag(tp, ENABLE_TSS))
9598                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
9599                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
9600                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9601                         tp->napi[i].chk_msi_cnt = 0;
9602                         tp->napi[i].last_rx_cons = 0;
9603                         tp->napi[i].last_tx_cons = 0;
9604                 }
9605                 if (!tg3_flag(tp, ENABLE_TSS))
9606                         tw32_mailbox(tp->napi[0].prodmbox, 0);
9607         } else {
9608                 tp->napi[0].tx_prod = 0;
9609                 tp->napi[0].tx_cons = 0;
9610                 tw32_mailbox(tp->napi[0].prodmbox, 0);
9611                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9612         }
9613
9614         /* Make sure the NIC-based send BD rings are disabled. */
9615         if (!tg3_flag(tp, 5705_PLUS)) {
9616                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9617                 for (i = 0; i < 16; i++)
9618                         tw32_tx_mbox(mbox + i * 8, 0);
9619         }
9620
9621         /* Clear status block in ram. */
9622         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9623
9624         /* Set status block DMA address */
9625         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9626              ((u64) tnapi->status_mapping >> 32));
9627         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9628              ((u64) tnapi->status_mapping & 0xffffffff));
9629
9630         stblk = HOSTCC_STATBLCK_RING1;
9631
9632         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9633                 u64 mapping = (u64)tnapi->status_mapping;
9634                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9635                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9636                 stblk += 8;
9637
9638                 /* Clear status block in ram. */
9639                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9640         }
9641
9642         tg3_tx_rcbs_init(tp);
9643         tg3_rx_ret_rcbs_init(tp);
9644 }
9645
9646 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9647 {
9648         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9649
9650         if (!tg3_flag(tp, 5750_PLUS) ||
9651             tg3_flag(tp, 5780_CLASS) ||
9652             tg3_asic_rev(tp) == ASIC_REV_5750 ||
9653             tg3_asic_rev(tp) == ASIC_REV_5752 ||
9654             tg3_flag(tp, 57765_PLUS))
9655                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9656         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9657                  tg3_asic_rev(tp) == ASIC_REV_5787)
9658                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9659         else
9660                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9661
9662         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9663         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9664
9665         val = min(nic_rep_thresh, host_rep_thresh);
9666         tw32(RCVBDI_STD_THRESH, val);
9667
9668         if (tg3_flag(tp, 57765_PLUS))
9669                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9670
9671         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9672                 return;
9673
9674         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9675
9676         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9677
9678         val = min(bdcache_maxcnt / 2, host_rep_thresh);
9679         tw32(RCVBDI_JUMBO_THRESH, val);
9680
9681         if (tg3_flag(tp, 57765_PLUS))
9682                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9683 }
9684
9685 static inline u32 calc_crc(unsigned char *buf, int len)
9686 {
9687         u32 reg;
9688         u32 tmp;
9689         int j, k;
9690
9691         reg = 0xffffffff;
9692
9693         for (j = 0; j < len; j++) {
9694                 reg ^= buf[j];
9695
9696                 for (k = 0; k < 8; k++) {
9697                         tmp = reg & 0x01;
9698
9699                         reg >>= 1;
9700
9701                         if (tmp)
9702                                 reg ^= CRC32_POLY_LE;
9703                 }
9704         }
9705
9706         return ~reg;
9707 }
9708
9709 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9710 {
9711         /* accept or reject all multicast frames */
9712         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9713         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9714         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9715         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9716 }
9717
9718 static void __tg3_set_rx_mode(struct net_device *dev)
9719 {
9720         struct tg3 *tp = netdev_priv(dev);
9721         u32 rx_mode;
9722
9723         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9724                                   RX_MODE_KEEP_VLAN_TAG);
9725
9726 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9727         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9728          * flag clear.
9729          */
9730         if (!tg3_flag(tp, ENABLE_ASF))
9731                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9732 #endif
9733
9734         if (dev->flags & IFF_PROMISC) {
9735                 /* Promiscuous mode. */
9736                 rx_mode |= RX_MODE_PROMISC;
9737         } else if (dev->flags & IFF_ALLMULTI) {
9738                 /* Accept all multicast. */
9739                 tg3_set_multi(tp, 1);
9740         } else if (netdev_mc_empty(dev)) {
9741                 /* Reject all multicast. */
9742                 tg3_set_multi(tp, 0);
9743         } else {
9744                 /* Accept one or more multicast(s). */
9745                 struct netdev_hw_addr *ha;
9746                 u32 mc_filter[4] = { 0, };
9747                 u32 regidx;
9748                 u32 bit;
9749                 u32 crc;
9750
9751                 netdev_for_each_mc_addr(ha, dev) {
9752                         crc = calc_crc(ha->addr, ETH_ALEN);
9753                         bit = ~crc & 0x7f;
9754                         regidx = (bit & 0x60) >> 5;
9755                         bit &= 0x1f;
9756                         mc_filter[regidx] |= (1 << bit);
9757                 }
9758
9759                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9760                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9761                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9762                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9763         }
9764
9765         if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9766                 rx_mode |= RX_MODE_PROMISC;
9767         } else if (!(dev->flags & IFF_PROMISC)) {
9768                 /* Add all entries into to the mac addr filter list */
9769                 int i = 0;
9770                 struct netdev_hw_addr *ha;
9771
9772                 netdev_for_each_uc_addr(ha, dev) {
9773                         __tg3_set_one_mac_addr(tp, ha->addr,
9774                                                i + TG3_UCAST_ADDR_IDX(tp));
9775                         i++;
9776                 }
9777         }
9778
9779         if (rx_mode != tp->rx_mode) {
9780                 tp->rx_mode = rx_mode;
9781                 tw32_f(MAC_RX_MODE, rx_mode);
9782                 udelay(10);
9783         }
9784 }
9785
9786 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9787 {
9788         int i;
9789
9790         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9791                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9792 }
9793
9794 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9795 {
9796         int i;
9797
9798         if (!tg3_flag(tp, SUPPORT_MSIX))
9799                 return;
9800
9801         if (tp->rxq_cnt == 1) {
9802                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9803                 return;
9804         }
9805
9806         /* Validate table against current IRQ count */
9807         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9808                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9809                         break;
9810         }
9811
9812         if (i != TG3_RSS_INDIR_TBL_SIZE)
9813                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9814 }
9815
9816 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9817 {
9818         int i = 0;
9819         u32 reg = MAC_RSS_INDIR_TBL_0;
9820
9821         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9822                 u32 val = tp->rss_ind_tbl[i];
9823                 i++;
9824                 for (; i % 8; i++) {
9825                         val <<= 4;
9826                         val |= tp->rss_ind_tbl[i];
9827                 }
9828                 tw32(reg, val);
9829                 reg += 4;
9830         }
9831 }
9832
9833 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9834 {
9835         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9836                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9837         else
9838                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9839 }
9840
9841 /* tp->lock is held. */
9842 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9843 {
9844         u32 val, rdmac_mode;
9845         int i, err, limit;
9846         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9847
9848         tg3_disable_ints(tp);
9849
9850         tg3_stop_fw(tp);
9851
9852         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9853
9854         if (tg3_flag(tp, INIT_COMPLETE))
9855                 tg3_abort_hw(tp, 1);
9856
9857         if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9858             !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9859                 tg3_phy_pull_config(tp);
9860                 tg3_eee_pull_config(tp, NULL);
9861                 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9862         }
9863
9864         /* Enable MAC control of LPI */
9865         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9866                 tg3_setup_eee(tp);
9867
9868         if (reset_phy)
9869                 tg3_phy_reset(tp);
9870
9871         err = tg3_chip_reset(tp);
9872         if (err)
9873                 return err;
9874
9875         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9876
9877         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9878                 val = tr32(TG3_CPMU_CTRL);
9879                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9880                 tw32(TG3_CPMU_CTRL, val);
9881
9882                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9883                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9884                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9885                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9886
9887                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9888                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9889                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9890                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9891
9892                 val = tr32(TG3_CPMU_HST_ACC);
9893                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9894                 val |= CPMU_HST_ACC_MACCLK_6_25;
9895                 tw32(TG3_CPMU_HST_ACC, val);
9896         }
9897
9898         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9899                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9900                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9901                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9902                 tw32(PCIE_PWR_MGMT_THRESH, val);
9903
9904                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9905                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9906
9907                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9908
9909                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9910                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9911         }
9912
9913         if (tg3_flag(tp, L1PLLPD_EN)) {
9914                 u32 grc_mode = tr32(GRC_MODE);
9915
9916                 /* Access the lower 1K of PL PCIE block registers. */
9917                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9918                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9919
9920                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9921                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9922                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9923
9924                 tw32(GRC_MODE, grc_mode);
9925         }
9926
9927         if (tg3_flag(tp, 57765_CLASS)) {
9928                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9929                         u32 grc_mode = tr32(GRC_MODE);
9930
9931                         /* Access the lower 1K of PL PCIE block registers. */
9932                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9933                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9934
9935                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9936                                    TG3_PCIE_PL_LO_PHYCTL5);
9937                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9938                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9939
9940                         tw32(GRC_MODE, grc_mode);
9941                 }
9942
9943                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9944                         u32 grc_mode;
9945
9946                         /* Fix transmit hangs */
9947                         val = tr32(TG3_CPMU_PADRNG_CTL);
9948                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9949                         tw32(TG3_CPMU_PADRNG_CTL, val);
9950
9951                         grc_mode = tr32(GRC_MODE);
9952
9953                         /* Access the lower 1K of DL PCIE block registers. */
9954                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9955                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9956
9957                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9958                                    TG3_PCIE_DL_LO_FTSMAX);
9959                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9960                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9961                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9962
9963                         tw32(GRC_MODE, grc_mode);
9964                 }
9965
9966                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9967                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9968                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9969                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9970         }
9971
9972         /* This works around an issue with Athlon chipsets on
9973          * B3 tigon3 silicon.  This bit has no effect on any
9974          * other revision.  But do not set this on PCI Express
9975          * chips and don't even touch the clocks if the CPMU is present.
9976          */
9977         if (!tg3_flag(tp, CPMU_PRESENT)) {
9978                 if (!tg3_flag(tp, PCI_EXPRESS))
9979                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9980                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9981         }
9982
9983         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9984             tg3_flag(tp, PCIX_MODE)) {
9985                 val = tr32(TG3PCI_PCISTATE);
9986                 val |= PCISTATE_RETRY_SAME_DMA;
9987                 tw32(TG3PCI_PCISTATE, val);
9988         }
9989
9990         if (tg3_flag(tp, ENABLE_APE)) {
9991                 /* Allow reads and writes to the
9992                  * APE register and memory space.
9993                  */
9994                 val = tr32(TG3PCI_PCISTATE);
9995                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9996                        PCISTATE_ALLOW_APE_SHMEM_WR |
9997                        PCISTATE_ALLOW_APE_PSPACE_WR;
9998                 tw32(TG3PCI_PCISTATE, val);
9999         }
10000
10001         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10002                 /* Enable some hw fixes.  */
10003                 val = tr32(TG3PCI_MSI_DATA);
10004                 val |= (1 << 26) | (1 << 28) | (1 << 29);
10005                 tw32(TG3PCI_MSI_DATA, val);
10006         }
10007
10008         /* Descriptor ring init may make accesses to the
10009          * NIC SRAM area to setup the TX descriptors, so we
10010          * can only do this after the hardware has been
10011          * successfully reset.
10012          */
10013         err = tg3_init_rings(tp);
10014         if (err)
10015                 return err;
10016
10017         if (tg3_flag(tp, 57765_PLUS)) {
10018                 val = tr32(TG3PCI_DMA_RW_CTRL) &
10019                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10020                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10021                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10022                 if (!tg3_flag(tp, 57765_CLASS) &&
10023                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
10024                     tg3_asic_rev(tp) != ASIC_REV_5762)
10025                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
10026                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10027         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10028                    tg3_asic_rev(tp) != ASIC_REV_5761) {
10029                 /* This value is determined during the probe time DMA
10030                  * engine test, tg3_test_dma.
10031                  */
10032                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10033         }
10034
10035         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10036                           GRC_MODE_4X_NIC_SEND_RINGS |
10037                           GRC_MODE_NO_TX_PHDR_CSUM |
10038                           GRC_MODE_NO_RX_PHDR_CSUM);
10039         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10040
10041         /* Pseudo-header checksum is done by hardware logic and not
10042          * the offload processers, so make the chip do the pseudo-
10043          * header checksums on receive.  For transmit it is more
10044          * convenient to do the pseudo-header checksum in software
10045          * as Linux does that on transmit for us in all cases.
10046          */
10047         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10048
10049         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10050         if (tp->rxptpctl)
10051                 tw32(TG3_RX_PTP_CTL,
10052                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10053
10054         if (tg3_flag(tp, PTP_CAPABLE))
10055                 val |= GRC_MODE_TIME_SYNC_ENABLE;
10056
10057         tw32(GRC_MODE, tp->grc_mode | val);
10058
10059         /* On one of the AMD platform, MRRS is restricted to 4000 because of
10060          * south bridge limitation. As a workaround, Driver is setting MRRS
10061          * to 2048 instead of default 4096.
10062          */
10063         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10064             tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10065                 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10066                 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10067         }
10068
10069         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
10070         val = tr32(GRC_MISC_CFG);
10071         val &= ~0xff;
10072         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10073         tw32(GRC_MISC_CFG, val);
10074
10075         /* Initialize MBUF/DESC pool. */
10076         if (tg3_flag(tp, 5750_PLUS)) {
10077                 /* Do nothing.  */
10078         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10079                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10080                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10081                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10082                 else
10083                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10084                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10085                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10086         } else if (tg3_flag(tp, TSO_CAPABLE)) {
10087                 int fw_len;
10088
10089                 fw_len = tp->fw_len;
10090                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10091                 tw32(BUFMGR_MB_POOL_ADDR,
10092                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10093                 tw32(BUFMGR_MB_POOL_SIZE,
10094                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10095         }
10096
10097         if (tp->dev->mtu <= ETH_DATA_LEN) {
10098                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10099                      tp->bufmgr_config.mbuf_read_dma_low_water);
10100                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10101                      tp->bufmgr_config.mbuf_mac_rx_low_water);
10102                 tw32(BUFMGR_MB_HIGH_WATER,
10103                      tp->bufmgr_config.mbuf_high_water);
10104         } else {
10105                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10106                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10107                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10108                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10109                 tw32(BUFMGR_MB_HIGH_WATER,
10110                      tp->bufmgr_config.mbuf_high_water_jumbo);
10111         }
10112         tw32(BUFMGR_DMA_LOW_WATER,
10113              tp->bufmgr_config.dma_low_water);
10114         tw32(BUFMGR_DMA_HIGH_WATER,
10115              tp->bufmgr_config.dma_high_water);
10116
10117         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10118         if (tg3_asic_rev(tp) == ASIC_REV_5719)
10119                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10120         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10121             tg3_asic_rev(tp) == ASIC_REV_5762 ||
10122             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10123             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10124                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10125         tw32(BUFMGR_MODE, val);
10126         for (i = 0; i < 2000; i++) {
10127                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10128                         break;
10129                 udelay(10);
10130         }
10131         if (i >= 2000) {
10132                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10133                 return -ENODEV;
10134         }
10135
10136         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10137                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10138
10139         tg3_setup_rxbd_thresholds(tp);
10140
10141         /* Initialize TG3_BDINFO's at:
10142          *  RCVDBDI_STD_BD:     standard eth size rx ring
10143          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
10144          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
10145          *
10146          * like so:
10147          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
10148          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
10149          *                              ring attribute flags
10150          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
10151          *
10152          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10153          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10154          *
10155          * The size of each ring is fixed in the firmware, but the location is
10156          * configurable.
10157          */
10158         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10159              ((u64) tpr->rx_std_mapping >> 32));
10160         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10161              ((u64) tpr->rx_std_mapping & 0xffffffff));
10162         if (!tg3_flag(tp, 5717_PLUS))
10163                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10164                      NIC_SRAM_RX_BUFFER_DESC);
10165
10166         /* Disable the mini ring */
10167         if (!tg3_flag(tp, 5705_PLUS))
10168                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10169                      BDINFO_FLAGS_DISABLED);
10170
10171         /* Program the jumbo buffer descriptor ring control
10172          * blocks on those devices that have them.
10173          */
10174         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10175             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10176
10177                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10178                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10179                              ((u64) tpr->rx_jmb_mapping >> 32));
10180                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10181                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10182                         val = TG3_RX_JMB_RING_SIZE(tp) <<
10183                               BDINFO_FLAGS_MAXLEN_SHIFT;
10184                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10185                              val | BDINFO_FLAGS_USE_EXT_RECV);
10186                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10187                             tg3_flag(tp, 57765_CLASS) ||
10188                             tg3_asic_rev(tp) == ASIC_REV_5762)
10189                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10190                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10191                 } else {
10192                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10193                              BDINFO_FLAGS_DISABLED);
10194                 }
10195
10196                 if (tg3_flag(tp, 57765_PLUS)) {
10197                         val = TG3_RX_STD_RING_SIZE(tp);
10198                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10199                         val |= (TG3_RX_STD_DMA_SZ << 2);
10200                 } else
10201                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10202         } else
10203                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10204
10205         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10206
10207         tpr->rx_std_prod_idx = tp->rx_pending;
10208         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10209
10210         tpr->rx_jmb_prod_idx =
10211                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10212         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10213
10214         tg3_rings_reset(tp);
10215
10216         /* Initialize MAC address and backoff seed. */
10217         __tg3_set_mac_addr(tp, false);
10218
10219         /* MTU + ethernet header + FCS + optional VLAN tag */
10220         tw32(MAC_RX_MTU_SIZE,
10221              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10222
10223         /* The slot time is changed by tg3_setup_phy if we
10224          * run at gigabit with half duplex.
10225          */
10226         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10227               (6 << TX_LENGTHS_IPG_SHIFT) |
10228               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10229
10230         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10231             tg3_asic_rev(tp) == ASIC_REV_5762)
10232                 val |= tr32(MAC_TX_LENGTHS) &
10233                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
10234                         TX_LENGTHS_CNT_DWN_VAL_MSK);
10235
10236         tw32(MAC_TX_LENGTHS, val);
10237
10238         /* Receive rules. */
10239         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10240         tw32(RCVLPC_CONFIG, 0x0181);
10241
10242         /* Calculate RDMAC_MODE setting early, we need it to determine
10243          * the RCVLPC_STATE_ENABLE mask.
10244          */
10245         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10246                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10247                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10248                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10249                       RDMAC_MODE_LNGREAD_ENAB);
10250
10251         if (tg3_asic_rev(tp) == ASIC_REV_5717)
10252                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10253
10254         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10255             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10256             tg3_asic_rev(tp) == ASIC_REV_57780)
10257                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10258                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10259                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10260
10261         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10262             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10263                 if (tg3_flag(tp, TSO_CAPABLE)) {
10264                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10265                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10266                            !tg3_flag(tp, IS_5788)) {
10267                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10268                 }
10269         }
10270
10271         if (tg3_flag(tp, PCI_EXPRESS))
10272                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10273
10274         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10275                 tp->dma_limit = 0;
10276                 if (tp->dev->mtu <= ETH_DATA_LEN) {
10277                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10278                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10279                 }
10280         }
10281
10282         if (tg3_flag(tp, HW_TSO_1) ||
10283             tg3_flag(tp, HW_TSO_2) ||
10284             tg3_flag(tp, HW_TSO_3))
10285                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10286
10287         if (tg3_flag(tp, 57765_PLUS) ||
10288             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10289             tg3_asic_rev(tp) == ASIC_REV_57780)
10290                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10291
10292         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10293             tg3_asic_rev(tp) == ASIC_REV_5762)
10294                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10295
10296         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10297             tg3_asic_rev(tp) == ASIC_REV_5784 ||
10298             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10299             tg3_asic_rev(tp) == ASIC_REV_57780 ||
10300             tg3_flag(tp, 57765_PLUS)) {
10301                 u32 tgtreg;
10302
10303                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10304                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10305                 else
10306                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
10307
10308                 val = tr32(tgtreg);
10309                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10310                     tg3_asic_rev(tp) == ASIC_REV_5762) {
10311                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10312                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10313                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10314                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10315                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10316                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10317                 }
10318                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10319         }
10320
10321         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10322             tg3_asic_rev(tp) == ASIC_REV_5720 ||
10323             tg3_asic_rev(tp) == ASIC_REV_5762) {
10324                 u32 tgtreg;
10325
10326                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10327                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10328                 else
10329                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10330
10331                 val = tr32(tgtreg);
10332                 tw32(tgtreg, val |
10333                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10334                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10335         }
10336
10337         /* Receive/send statistics. */
10338         if (tg3_flag(tp, 5750_PLUS)) {
10339                 val = tr32(RCVLPC_STATS_ENABLE);
10340                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10341                 tw32(RCVLPC_STATS_ENABLE, val);
10342         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10343                    tg3_flag(tp, TSO_CAPABLE)) {
10344                 val = tr32(RCVLPC_STATS_ENABLE);
10345                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10346                 tw32(RCVLPC_STATS_ENABLE, val);
10347         } else {
10348                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10349         }
10350         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10351         tw32(SNDDATAI_STATSENAB, 0xffffff);
10352         tw32(SNDDATAI_STATSCTRL,
10353              (SNDDATAI_SCTRL_ENABLE |
10354               SNDDATAI_SCTRL_FASTUPD));
10355
10356         /* Setup host coalescing engine. */
10357         tw32(HOSTCC_MODE, 0);
10358         for (i = 0; i < 2000; i++) {
10359                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10360                         break;
10361                 udelay(10);
10362         }
10363
10364         __tg3_set_coalesce(tp, &tp->coal);
10365
10366         if (!tg3_flag(tp, 5705_PLUS)) {
10367                 /* Status/statistics block address.  See tg3_timer,
10368                  * the tg3_periodic_fetch_stats call there, and
10369                  * tg3_get_stats to see how this works for 5705/5750 chips.
10370                  */
10371                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10372                      ((u64) tp->stats_mapping >> 32));
10373                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10374                      ((u64) tp->stats_mapping & 0xffffffff));
10375                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10376
10377                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10378
10379                 /* Clear statistics and status block memory areas */
10380                 for (i = NIC_SRAM_STATS_BLK;
10381                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10382                      i += sizeof(u32)) {
10383                         tg3_write_mem(tp, i, 0);
10384                         udelay(40);
10385                 }
10386         }
10387
10388         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10389
10390         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10391         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10392         if (!tg3_flag(tp, 5705_PLUS))
10393                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10394
10395         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10396                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10397                 /* reset to prevent losing 1st rx packet intermittently */
10398                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10399                 udelay(10);
10400         }
10401
10402         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10403                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10404                         MAC_MODE_FHDE_ENABLE;
10405         if (tg3_flag(tp, ENABLE_APE))
10406                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10407         if (!tg3_flag(tp, 5705_PLUS) &&
10408             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10409             tg3_asic_rev(tp) != ASIC_REV_5700)
10410                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10411         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10412         udelay(40);
10413
10414         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10415          * If TG3_FLAG_IS_NIC is zero, we should read the
10416          * register to preserve the GPIO settings for LOMs. The GPIOs,
10417          * whether used as inputs or outputs, are set by boot code after
10418          * reset.
10419          */
10420         if (!tg3_flag(tp, IS_NIC)) {
10421                 u32 gpio_mask;
10422
10423                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10424                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10425                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10426
10427                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10428                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10429                                      GRC_LCLCTRL_GPIO_OUTPUT3;
10430
10431                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10432                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10433
10434                 tp->grc_local_ctrl &= ~gpio_mask;
10435                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10436
10437                 /* GPIO1 must be driven high for eeprom write protect */
10438                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10439                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10440                                                GRC_LCLCTRL_GPIO_OUTPUT1);
10441         }
10442         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10443         udelay(100);
10444
10445         if (tg3_flag(tp, USING_MSIX)) {
10446                 val = tr32(MSGINT_MODE);
10447                 val |= MSGINT_MODE_ENABLE;
10448                 if (tp->irq_cnt > 1)
10449                         val |= MSGINT_MODE_MULTIVEC_EN;
10450                 if (!tg3_flag(tp, 1SHOT_MSI))
10451                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10452                 tw32(MSGINT_MODE, val);
10453         }
10454
10455         if (!tg3_flag(tp, 5705_PLUS)) {
10456                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10457                 udelay(40);
10458         }
10459
10460         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10461                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10462                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10463                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10464                WDMAC_MODE_LNGREAD_ENAB);
10465
10466         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10467             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10468                 if (tg3_flag(tp, TSO_CAPABLE) &&
10469                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10470                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10471                         /* nothing */
10472                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10473                            !tg3_flag(tp, IS_5788)) {
10474                         val |= WDMAC_MODE_RX_ACCEL;
10475                 }
10476         }
10477
10478         /* Enable host coalescing bug fix */
10479         if (tg3_flag(tp, 5755_PLUS))
10480                 val |= WDMAC_MODE_STATUS_TAG_FIX;
10481
10482         if (tg3_asic_rev(tp) == ASIC_REV_5785)
10483                 val |= WDMAC_MODE_BURST_ALL_DATA;
10484
10485         tw32_f(WDMAC_MODE, val);
10486         udelay(40);
10487
10488         if (tg3_flag(tp, PCIX_MODE)) {
10489                 u16 pcix_cmd;
10490
10491                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10492                                      &pcix_cmd);
10493                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10494                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10495                         pcix_cmd |= PCI_X_CMD_READ_2K;
10496                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10497                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10498                         pcix_cmd |= PCI_X_CMD_READ_2K;
10499                 }
10500                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10501                                       pcix_cmd);
10502         }
10503
10504         tw32_f(RDMAC_MODE, rdmac_mode);
10505         udelay(40);
10506
10507         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10508             tg3_asic_rev(tp) == ASIC_REV_5720) {
10509                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10510                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10511                                 break;
10512                 }
10513                 if (i < TG3_NUM_RDMA_CHANNELS) {
10514                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10515                         val |= tg3_lso_rd_dma_workaround_bit(tp);
10516                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10517                         tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10518                 }
10519         }
10520
10521         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10522         if (!tg3_flag(tp, 5705_PLUS))
10523                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10524
10525         if (tg3_asic_rev(tp) == ASIC_REV_5761)
10526                 tw32(SNDDATAC_MODE,
10527                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10528         else
10529                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10530
10531         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10532         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10533         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10534         if (tg3_flag(tp, LRG_PROD_RING_CAP))
10535                 val |= RCVDBDI_MODE_LRG_RING_SZ;
10536         tw32(RCVDBDI_MODE, val);
10537         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10538         if (tg3_flag(tp, HW_TSO_1) ||
10539             tg3_flag(tp, HW_TSO_2) ||
10540             tg3_flag(tp, HW_TSO_3))
10541                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10542         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10543         if (tg3_flag(tp, ENABLE_TSS))
10544                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10545         tw32(SNDBDI_MODE, val);
10546         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10547
10548         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10549                 err = tg3_load_5701_a0_firmware_fix(tp);
10550                 if (err)
10551                         return err;
10552         }
10553
10554         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10555                 /* Ignore any errors for the firmware download. If download
10556                  * fails, the device will operate with EEE disabled
10557                  */
10558                 tg3_load_57766_firmware(tp);
10559         }
10560
10561         if (tg3_flag(tp, TSO_CAPABLE)) {
10562                 err = tg3_load_tso_firmware(tp);
10563                 if (err)
10564                         return err;
10565         }
10566
10567         tp->tx_mode = TX_MODE_ENABLE;
10568
10569         if (tg3_flag(tp, 5755_PLUS) ||
10570             tg3_asic_rev(tp) == ASIC_REV_5906)
10571                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10572
10573         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10574             tg3_asic_rev(tp) == ASIC_REV_5762) {
10575                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10576                 tp->tx_mode &= ~val;
10577                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10578         }
10579
10580         tw32_f(MAC_TX_MODE, tp->tx_mode);
10581         udelay(100);
10582
10583         if (tg3_flag(tp, ENABLE_RSS)) {
10584                 u32 rss_key[10];
10585
10586                 tg3_rss_write_indir_tbl(tp);
10587
10588                 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10589
10590                 for (i = 0; i < 10 ; i++)
10591                         tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10592         }
10593
10594         tp->rx_mode = RX_MODE_ENABLE;
10595         if (tg3_flag(tp, 5755_PLUS))
10596                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10597
10598         if (tg3_asic_rev(tp) == ASIC_REV_5762)
10599                 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10600
10601         if (tg3_flag(tp, ENABLE_RSS))
10602                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10603                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
10604                                RX_MODE_RSS_IPV6_HASH_EN |
10605                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
10606                                RX_MODE_RSS_IPV4_HASH_EN |
10607                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
10608
10609         tw32_f(MAC_RX_MODE, tp->rx_mode);
10610         udelay(10);
10611
10612         tw32(MAC_LED_CTRL, tp->led_ctrl);
10613
10614         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10615         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10616                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10617                 udelay(10);
10618         }
10619         tw32_f(MAC_RX_MODE, tp->rx_mode);
10620         udelay(10);
10621
10622         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10623                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10624                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10625                         /* Set drive transmission level to 1.2V  */
10626                         /* only if the signal pre-emphasis bit is not set  */
10627                         val = tr32(MAC_SERDES_CFG);
10628                         val &= 0xfffff000;
10629                         val |= 0x880;
10630                         tw32(MAC_SERDES_CFG, val);
10631                 }
10632                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10633                         tw32(MAC_SERDES_CFG, 0x616000);
10634         }
10635
10636         /* Prevent chip from dropping frames when flow control
10637          * is enabled.
10638          */
10639         if (tg3_flag(tp, 57765_CLASS))
10640                 val = 1;
10641         else
10642                 val = 2;
10643         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10644
10645         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10646             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10647                 /* Use hardware link auto-negotiation */
10648                 tg3_flag_set(tp, HW_AUTONEG);
10649         }
10650
10651         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10652             tg3_asic_rev(tp) == ASIC_REV_5714) {
10653                 u32 tmp;
10654
10655                 tmp = tr32(SERDES_RX_CTRL);
10656                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10657                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10658                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10659                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10660         }
10661
10662         if (!tg3_flag(tp, USE_PHYLIB)) {
10663                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10664                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10665
10666                 err = tg3_setup_phy(tp, false);
10667                 if (err)
10668                         return err;
10669
10670                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10671                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10672                         u32 tmp;
10673
10674                         /* Clear CRC stats. */
10675                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10676                                 tg3_writephy(tp, MII_TG3_TEST1,
10677                                              tmp | MII_TG3_TEST1_CRC_EN);
10678                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10679                         }
10680                 }
10681         }
10682
10683         __tg3_set_rx_mode(tp->dev);
10684
10685         /* Initialize receive rules. */
10686         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10687         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10688         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10689         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10690
10691         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10692                 limit = 8;
10693         else
10694                 limit = 16;
10695         if (tg3_flag(tp, ENABLE_ASF))
10696                 limit -= 4;
10697         switch (limit) {
10698         case 16:
10699                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10700                 fallthrough;
10701         case 15:
10702                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10703                 fallthrough;
10704         case 14:
10705                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10706                 fallthrough;
10707         case 13:
10708                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10709                 fallthrough;
10710         case 12:
10711                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10712                 fallthrough;
10713         case 11:
10714                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10715                 fallthrough;
10716         case 10:
10717                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10718                 fallthrough;
10719         case 9:
10720                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10721                 fallthrough;
10722         case 8:
10723                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10724                 fallthrough;
10725         case 7:
10726                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10727                 fallthrough;
10728         case 6:
10729                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10730                 fallthrough;
10731         case 5:
10732                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10733                 fallthrough;
10734         case 4:
10735                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10736         case 3:
10737                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10738         case 2:
10739         case 1:
10740
10741         default:
10742                 break;
10743         }
10744
10745         if (tg3_flag(tp, ENABLE_APE))
10746                 /* Write our heartbeat update interval to APE. */
10747                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10748                                 APE_HOST_HEARTBEAT_INT_5SEC);
10749
10750         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10751
10752         return 0;
10753 }
10754
10755 /* Called at device open time to get the chip ready for
10756  * packet processing.  Invoked with tp->lock held.
10757  */
10758 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10759 {
10760         /* Chip may have been just powered on. If so, the boot code may still
10761          * be running initialization. Wait for it to finish to avoid races in
10762          * accessing the hardware.
10763          */
10764         tg3_enable_register_access(tp);
10765         tg3_poll_fw(tp);
10766
10767         tg3_switch_clocks(tp);
10768
10769         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10770
10771         return tg3_reset_hw(tp, reset_phy);
10772 }
10773
10774 #ifdef CONFIG_TIGON3_HWMON
10775 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10776 {
10777         u32 off, len = TG3_OCIR_LEN;
10778         int i;
10779
10780         for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) {
10781                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10782
10783                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10784                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10785                         memset(ocir, 0, len);
10786         }
10787 }
10788
10789 /* sysfs attributes for hwmon */
10790 static ssize_t tg3_show_temp(struct device *dev,
10791                              struct device_attribute *devattr, char *buf)
10792 {
10793         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10794         struct tg3 *tp = dev_get_drvdata(dev);
10795         u32 temperature;
10796
10797         spin_lock_bh(&tp->lock);
10798         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10799                                 sizeof(temperature));
10800         spin_unlock_bh(&tp->lock);
10801         return sprintf(buf, "%u\n", temperature * 1000);
10802 }
10803
10804
10805 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10806                           TG3_TEMP_SENSOR_OFFSET);
10807 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10808                           TG3_TEMP_CAUTION_OFFSET);
10809 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10810                           TG3_TEMP_MAX_OFFSET);
10811
10812 static struct attribute *tg3_attrs[] = {
10813         &sensor_dev_attr_temp1_input.dev_attr.attr,
10814         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10815         &sensor_dev_attr_temp1_max.dev_attr.attr,
10816         NULL
10817 };
10818 ATTRIBUTE_GROUPS(tg3);
10819
10820 static void tg3_hwmon_close(struct tg3 *tp)
10821 {
10822         if (tp->hwmon_dev) {
10823                 hwmon_device_unregister(tp->hwmon_dev);
10824                 tp->hwmon_dev = NULL;
10825         }
10826 }
10827
10828 static void tg3_hwmon_open(struct tg3 *tp)
10829 {
10830         int i;
10831         u32 size = 0;
10832         struct pci_dev *pdev = tp->pdev;
10833         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10834
10835         tg3_sd_scan_scratchpad(tp, ocirs);
10836
10837         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10838                 if (!ocirs[i].src_data_length)
10839                         continue;
10840
10841                 size += ocirs[i].src_hdr_length;
10842                 size += ocirs[i].src_data_length;
10843         }
10844
10845         if (!size)
10846                 return;
10847
10848         tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10849                                                           tp, tg3_groups);
10850         if (IS_ERR(tp->hwmon_dev)) {
10851                 tp->hwmon_dev = NULL;
10852                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10853         }
10854 }
10855 #else
10856 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10857 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10858 #endif /* CONFIG_TIGON3_HWMON */
10859
10860
10861 #define TG3_STAT_ADD32(PSTAT, REG) \
10862 do {    u32 __val = tr32(REG); \
10863         (PSTAT)->low += __val; \
10864         if ((PSTAT)->low < __val) \
10865                 (PSTAT)->high += 1; \
10866 } while (0)
10867
10868 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10869 {
10870         struct tg3_hw_stats *sp = tp->hw_stats;
10871
10872         if (!tp->link_up)
10873                 return;
10874
10875         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10876         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10877         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10878         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10879         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10880         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10881         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10882         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10883         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10884         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10885         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10886         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10887         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10888         if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10889                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10890                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10891                 u32 val;
10892
10893                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10894                 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10895                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10896                 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10897         }
10898
10899         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10900         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10901         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10902         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10903         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10904         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10905         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10906         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10907         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10908         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10909         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10910         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10911         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10912         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10913
10914         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10915         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10916             tg3_asic_rev(tp) != ASIC_REV_5762 &&
10917             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10918             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10919                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10920         } else {
10921                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10922                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10923                 if (val) {
10924                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10925                         sp->rx_discards.low += val;
10926                         if (sp->rx_discards.low < val)
10927                                 sp->rx_discards.high += 1;
10928                 }
10929                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10930         }
10931         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10932 }
10933
10934 static void tg3_chk_missed_msi(struct tg3 *tp)
10935 {
10936         u32 i;
10937
10938         for (i = 0; i < tp->irq_cnt; i++) {
10939                 struct tg3_napi *tnapi = &tp->napi[i];
10940
10941                 if (tg3_has_work(tnapi)) {
10942                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10943                             tnapi->last_tx_cons == tnapi->tx_cons) {
10944                                 if (tnapi->chk_msi_cnt < 1) {
10945                                         tnapi->chk_msi_cnt++;
10946                                         return;
10947                                 }
10948                                 tg3_msi(0, tnapi);
10949                         }
10950                 }
10951                 tnapi->chk_msi_cnt = 0;
10952                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10953                 tnapi->last_tx_cons = tnapi->tx_cons;
10954         }
10955 }
10956
10957 static void tg3_timer(struct timer_list *t)
10958 {
10959         struct tg3 *tp = from_timer(tp, t, timer);
10960
10961         spin_lock(&tp->lock);
10962
10963         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10964                 spin_unlock(&tp->lock);
10965                 goto restart_timer;
10966         }
10967
10968         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10969             tg3_flag(tp, 57765_CLASS))
10970                 tg3_chk_missed_msi(tp);
10971
10972         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10973                 /* BCM4785: Flush posted writes from GbE to host memory. */
10974                 tr32(HOSTCC_MODE);
10975         }
10976
10977         if (!tg3_flag(tp, TAGGED_STATUS)) {
10978                 /* All of this garbage is because when using non-tagged
10979                  * IRQ status the mailbox/status_block protocol the chip
10980                  * uses with the cpu is race prone.
10981                  */
10982                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10983                         tw32(GRC_LOCAL_CTRL,
10984                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10985                 } else {
10986                         tw32(HOSTCC_MODE, tp->coalesce_mode |
10987                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10988                 }
10989
10990                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10991                         spin_unlock(&tp->lock);
10992                         tg3_reset_task_schedule(tp);
10993                         goto restart_timer;
10994                 }
10995         }
10996
10997         /* This part only runs once per second. */
10998         if (!--tp->timer_counter) {
10999                 if (tg3_flag(tp, 5705_PLUS))
11000                         tg3_periodic_fetch_stats(tp);
11001
11002                 if (tp->setlpicnt && !--tp->setlpicnt)
11003                         tg3_phy_eee_enable(tp);
11004
11005                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
11006                         u32 mac_stat;
11007                         int phy_event;
11008
11009                         mac_stat = tr32(MAC_STATUS);
11010
11011                         phy_event = 0;
11012                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11013                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11014                                         phy_event = 1;
11015                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11016                                 phy_event = 1;
11017
11018                         if (phy_event)
11019                                 tg3_setup_phy(tp, false);
11020                 } else if (tg3_flag(tp, POLL_SERDES)) {
11021                         u32 mac_stat = tr32(MAC_STATUS);
11022                         int need_setup = 0;
11023
11024                         if (tp->link_up &&
11025                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11026                                 need_setup = 1;
11027                         }
11028                         if (!tp->link_up &&
11029                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
11030                                          MAC_STATUS_SIGNAL_DET))) {
11031                                 need_setup = 1;
11032                         }
11033                         if (need_setup) {
11034                                 if (!tp->serdes_counter) {
11035                                         tw32_f(MAC_MODE,
11036                                              (tp->mac_mode &
11037                                               ~MAC_MODE_PORT_MODE_MASK));
11038                                         udelay(40);
11039                                         tw32_f(MAC_MODE, tp->mac_mode);
11040                                         udelay(40);
11041                                 }
11042                                 tg3_setup_phy(tp, false);
11043                         }
11044                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11045                            tg3_flag(tp, 5780_CLASS)) {
11046                         tg3_serdes_parallel_detect(tp);
11047                 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11048                         u32 cpmu = tr32(TG3_CPMU_STATUS);
11049                         bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11050                                          TG3_CPMU_STATUS_LINK_MASK);
11051
11052                         if (link_up != tp->link_up)
11053                                 tg3_setup_phy(tp, false);
11054                 }
11055
11056                 tp->timer_counter = tp->timer_multiplier;
11057         }
11058
11059         /* Heartbeat is only sent once every 2 seconds.
11060          *
11061          * The heartbeat is to tell the ASF firmware that the host
11062          * driver is still alive.  In the event that the OS crashes,
11063          * ASF needs to reset the hardware to free up the FIFO space
11064          * that may be filled with rx packets destined for the host.
11065          * If the FIFO is full, ASF will no longer function properly.
11066          *
11067          * Unintended resets have been reported on real time kernels
11068          * where the timer doesn't run on time.  Netpoll will also have
11069          * same problem.
11070          *
11071          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11072          * to check the ring condition when the heartbeat is expiring
11073          * before doing the reset.  This will prevent most unintended
11074          * resets.
11075          */
11076         if (!--tp->asf_counter) {
11077                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11078                         tg3_wait_for_event_ack(tp);
11079
11080                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11081                                       FWCMD_NICDRV_ALIVE3);
11082                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11083                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11084                                       TG3_FW_UPDATE_TIMEOUT_SEC);
11085
11086                         tg3_generate_fw_event(tp);
11087                 }
11088                 tp->asf_counter = tp->asf_multiplier;
11089         }
11090
11091         /* Update the APE heartbeat every 5 seconds.*/
11092         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11093
11094         spin_unlock(&tp->lock);
11095
11096 restart_timer:
11097         tp->timer.expires = jiffies + tp->timer_offset;
11098         add_timer(&tp->timer);
11099 }
11100
11101 static void tg3_timer_init(struct tg3 *tp)
11102 {
11103         if (tg3_flag(tp, TAGGED_STATUS) &&
11104             tg3_asic_rev(tp) != ASIC_REV_5717 &&
11105             !tg3_flag(tp, 57765_CLASS))
11106                 tp->timer_offset = HZ;
11107         else
11108                 tp->timer_offset = HZ / 10;
11109
11110         BUG_ON(tp->timer_offset > HZ);
11111
11112         tp->timer_multiplier = (HZ / tp->timer_offset);
11113         tp->asf_multiplier = (HZ / tp->timer_offset) *
11114                              TG3_FW_UPDATE_FREQ_SEC;
11115
11116         timer_setup(&tp->timer, tg3_timer, 0);
11117 }
11118
11119 static void tg3_timer_start(struct tg3 *tp)
11120 {
11121         tp->asf_counter   = tp->asf_multiplier;
11122         tp->timer_counter = tp->timer_multiplier;
11123
11124         tp->timer.expires = jiffies + tp->timer_offset;
11125         add_timer(&tp->timer);
11126 }
11127
11128 static void tg3_timer_stop(struct tg3 *tp)
11129 {
11130         del_timer_sync(&tp->timer);
11131 }
11132
11133 /* Restart hardware after configuration changes, self-test, etc.
11134  * Invoked with tp->lock held.
11135  */
11136 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11137         __releases(tp->lock)
11138         __acquires(tp->lock)
11139 {
11140         int err;
11141
11142         err = tg3_init_hw(tp, reset_phy);
11143         if (err) {
11144                 netdev_err(tp->dev,
11145                            "Failed to re-initialize device, aborting\n");
11146                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11147                 tg3_full_unlock(tp);
11148                 tg3_timer_stop(tp);
11149                 tp->irq_sync = 0;
11150                 tg3_napi_enable(tp);
11151                 dev_close(tp->dev);
11152                 tg3_full_lock(tp, 0);
11153         }
11154         return err;
11155 }
11156
11157 static void tg3_reset_task(struct work_struct *work)
11158 {
11159         struct tg3 *tp = container_of(work, struct tg3, reset_task);
11160         int err;
11161
11162         rtnl_lock();
11163         tg3_full_lock(tp, 0);
11164
11165         if (!netif_running(tp->dev)) {
11166                 tg3_flag_clear(tp, RESET_TASK_PENDING);
11167                 tg3_full_unlock(tp);
11168                 rtnl_unlock();
11169                 return;
11170         }
11171
11172         tg3_full_unlock(tp);
11173
11174         tg3_phy_stop(tp);
11175
11176         tg3_netif_stop(tp);
11177
11178         tg3_full_lock(tp, 1);
11179
11180         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11181                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11182                 tp->write32_rx_mbox = tg3_write_flush_reg32;
11183                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11184                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11185         }
11186
11187         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11188         err = tg3_init_hw(tp, true);
11189         if (err) {
11190                 tg3_full_unlock(tp);
11191                 tp->irq_sync = 0;
11192                 tg3_napi_enable(tp);
11193                 /* Clear this flag so that tg3_reset_task_cancel() will not
11194                  * call cancel_work_sync() and wait forever.
11195                  */
11196                 tg3_flag_clear(tp, RESET_TASK_PENDING);
11197                 dev_close(tp->dev);
11198                 goto out;
11199         }
11200
11201         tg3_netif_start(tp);
11202         tg3_full_unlock(tp);
11203         tg3_phy_start(tp);
11204         tg3_flag_clear(tp, RESET_TASK_PENDING);
11205 out:
11206         rtnl_unlock();
11207 }
11208
11209 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11210 {
11211         irq_handler_t fn;
11212         unsigned long flags;
11213         char *name;
11214         struct tg3_napi *tnapi = &tp->napi[irq_num];
11215
11216         if (tp->irq_cnt == 1)
11217                 name = tp->dev->name;
11218         else {
11219                 name = &tnapi->irq_lbl[0];
11220                 if (tnapi->tx_buffers && tnapi->rx_rcb)
11221                         snprintf(name, IFNAMSIZ,
11222                                  "%s-txrx-%d", tp->dev->name, irq_num);
11223                 else if (tnapi->tx_buffers)
11224                         snprintf(name, IFNAMSIZ,
11225                                  "%s-tx-%d", tp->dev->name, irq_num);
11226                 else if (tnapi->rx_rcb)
11227                         snprintf(name, IFNAMSIZ,
11228                                  "%s-rx-%d", tp->dev->name, irq_num);
11229                 else
11230                         snprintf(name, IFNAMSIZ,
11231                                  "%s-%d", tp->dev->name, irq_num);
11232                 name[IFNAMSIZ-1] = 0;
11233         }
11234
11235         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11236                 fn = tg3_msi;
11237                 if (tg3_flag(tp, 1SHOT_MSI))
11238                         fn = tg3_msi_1shot;
11239                 flags = 0;
11240         } else {
11241                 fn = tg3_interrupt;
11242                 if (tg3_flag(tp, TAGGED_STATUS))
11243                         fn = tg3_interrupt_tagged;
11244                 flags = IRQF_SHARED;
11245         }
11246
11247         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11248 }
11249
11250 static int tg3_test_interrupt(struct tg3 *tp)
11251 {
11252         struct tg3_napi *tnapi = &tp->napi[0];
11253         struct net_device *dev = tp->dev;
11254         int err, i, intr_ok = 0;
11255         u32 val;
11256
11257         if (!netif_running(dev))
11258                 return -ENODEV;
11259
11260         tg3_disable_ints(tp);
11261
11262         free_irq(tnapi->irq_vec, tnapi);
11263
11264         /*
11265          * Turn off MSI one shot mode.  Otherwise this test has no
11266          * observable way to know whether the interrupt was delivered.
11267          */
11268         if (tg3_flag(tp, 57765_PLUS)) {
11269                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11270                 tw32(MSGINT_MODE, val);
11271         }
11272
11273         err = request_irq(tnapi->irq_vec, tg3_test_isr,
11274                           IRQF_SHARED, dev->name, tnapi);
11275         if (err)
11276                 return err;
11277
11278         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11279         tg3_enable_ints(tp);
11280
11281         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11282                tnapi->coal_now);
11283
11284         for (i = 0; i < 5; i++) {
11285                 u32 int_mbox, misc_host_ctrl;
11286
11287                 int_mbox = tr32_mailbox(tnapi->int_mbox);
11288                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11289
11290                 if ((int_mbox != 0) ||
11291                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11292                         intr_ok = 1;
11293                         break;
11294                 }
11295
11296                 if (tg3_flag(tp, 57765_PLUS) &&
11297                     tnapi->hw_status->status_tag != tnapi->last_tag)
11298                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11299
11300                 msleep(10);
11301         }
11302
11303         tg3_disable_ints(tp);
11304
11305         free_irq(tnapi->irq_vec, tnapi);
11306
11307         err = tg3_request_irq(tp, 0);
11308
11309         if (err)
11310                 return err;
11311
11312         if (intr_ok) {
11313                 /* Reenable MSI one shot mode. */
11314                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11315                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11316                         tw32(MSGINT_MODE, val);
11317                 }
11318                 return 0;
11319         }
11320
11321         return -EIO;
11322 }
11323
11324 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11325  * successfully restored
11326  */
11327 static int tg3_test_msi(struct tg3 *tp)
11328 {
11329         int err;
11330         u16 pci_cmd;
11331
11332         if (!tg3_flag(tp, USING_MSI))
11333                 return 0;
11334
11335         /* Turn off SERR reporting in case MSI terminates with Master
11336          * Abort.
11337          */
11338         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11339         pci_write_config_word(tp->pdev, PCI_COMMAND,
11340                               pci_cmd & ~PCI_COMMAND_SERR);
11341
11342         err = tg3_test_interrupt(tp);
11343
11344         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11345
11346         if (!err)
11347                 return 0;
11348
11349         /* other failures */
11350         if (err != -EIO)
11351                 return err;
11352
11353         /* MSI test failed, go back to INTx mode */
11354         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11355                     "to INTx mode. Please report this failure to the PCI "
11356                     "maintainer and include system chipset information\n");
11357
11358         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11359
11360         pci_disable_msi(tp->pdev);
11361
11362         tg3_flag_clear(tp, USING_MSI);
11363         tp->napi[0].irq_vec = tp->pdev->irq;
11364
11365         err = tg3_request_irq(tp, 0);
11366         if (err)
11367                 return err;
11368
11369         /* Need to reset the chip because the MSI cycle may have terminated
11370          * with Master Abort.
11371          */
11372         tg3_full_lock(tp, 1);
11373
11374         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11375         err = tg3_init_hw(tp, true);
11376
11377         tg3_full_unlock(tp);
11378
11379         if (err)
11380                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11381
11382         return err;
11383 }
11384
11385 static int tg3_request_firmware(struct tg3 *tp)
11386 {
11387         const struct tg3_firmware_hdr *fw_hdr;
11388
11389         if (reject_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11390                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11391                            tp->fw_needed);
11392                 return -ENOENT;
11393         }
11394
11395         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11396
11397         /* Firmware blob starts with version numbers, followed by
11398          * start address and _full_ length including BSS sections
11399          * (which must be longer than the actual data, of course
11400          */
11401
11402         tp->fw_len = be32_to_cpu(fw_hdr->len);  /* includes bss */
11403         if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11404                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11405                            tp->fw_len, tp->fw_needed);
11406                 release_firmware(tp->fw);
11407                 tp->fw = NULL;
11408                 return -EINVAL;
11409         }
11410
11411         /* We no longer need firmware; we have it. */
11412         tp->fw_needed = NULL;
11413         return 0;
11414 }
11415
11416 static u32 tg3_irq_count(struct tg3 *tp)
11417 {
11418         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11419
11420         if (irq_cnt > 1) {
11421                 /* We want as many rx rings enabled as there are cpus.
11422                  * In multiqueue MSI-X mode, the first MSI-X vector
11423                  * only deals with link interrupts, etc, so we add
11424                  * one to the number of vectors we are requesting.
11425                  */
11426                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11427         }
11428
11429         return irq_cnt;
11430 }
11431
11432 static bool tg3_enable_msix(struct tg3 *tp)
11433 {
11434         int i, rc;
11435         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11436
11437         tp->txq_cnt = tp->txq_req;
11438         tp->rxq_cnt = tp->rxq_req;
11439         if (!tp->rxq_cnt)
11440                 tp->rxq_cnt = netif_get_num_default_rss_queues();
11441         if (tp->rxq_cnt > tp->rxq_max)
11442                 tp->rxq_cnt = tp->rxq_max;
11443
11444         /* Disable multiple TX rings by default.  Simple round-robin hardware
11445          * scheduling of the TX rings can cause starvation of rings with
11446          * small packets when other rings have TSO or jumbo packets.
11447          */
11448         if (!tp->txq_req)
11449                 tp->txq_cnt = 1;
11450
11451         tp->irq_cnt = tg3_irq_count(tp);
11452
11453         for (i = 0; i < tp->irq_max; i++) {
11454                 msix_ent[i].entry  = i;
11455                 msix_ent[i].vector = 0;
11456         }
11457
11458         rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11459         if (rc < 0) {
11460                 return false;
11461         } else if (rc < tp->irq_cnt) {
11462                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11463                               tp->irq_cnt, rc);
11464                 tp->irq_cnt = rc;
11465                 tp->rxq_cnt = max(rc - 1, 1);
11466                 if (tp->txq_cnt)
11467                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11468         }
11469
11470         for (i = 0; i < tp->irq_max; i++)
11471                 tp->napi[i].irq_vec = msix_ent[i].vector;
11472
11473         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11474                 pci_disable_msix(tp->pdev);
11475                 return false;
11476         }
11477
11478         if (tp->irq_cnt == 1)
11479                 return true;
11480
11481         tg3_flag_set(tp, ENABLE_RSS);
11482
11483         if (tp->txq_cnt > 1)
11484                 tg3_flag_set(tp, ENABLE_TSS);
11485
11486         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11487
11488         return true;
11489 }
11490
11491 static void tg3_ints_init(struct tg3 *tp)
11492 {
11493         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11494             !tg3_flag(tp, TAGGED_STATUS)) {
11495                 /* All MSI supporting chips should support tagged
11496                  * status.  Assert that this is the case.
11497                  */
11498                 netdev_warn(tp->dev,
11499                             "MSI without TAGGED_STATUS? Not using MSI\n");
11500                 goto defcfg;
11501         }
11502
11503         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11504                 tg3_flag_set(tp, USING_MSIX);
11505         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11506                 tg3_flag_set(tp, USING_MSI);
11507
11508         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11509                 u32 msi_mode = tr32(MSGINT_MODE);
11510                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11511                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11512                 if (!tg3_flag(tp, 1SHOT_MSI))
11513                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11514                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11515         }
11516 defcfg:
11517         if (!tg3_flag(tp, USING_MSIX)) {
11518                 tp->irq_cnt = 1;
11519                 tp->napi[0].irq_vec = tp->pdev->irq;
11520         }
11521
11522         if (tp->irq_cnt == 1) {
11523                 tp->txq_cnt = 1;
11524                 tp->rxq_cnt = 1;
11525                 netif_set_real_num_tx_queues(tp->dev, 1);
11526                 netif_set_real_num_rx_queues(tp->dev, 1);
11527         }
11528 }
11529
11530 static void tg3_ints_fini(struct tg3 *tp)
11531 {
11532         if (tg3_flag(tp, USING_MSIX))
11533                 pci_disable_msix(tp->pdev);
11534         else if (tg3_flag(tp, USING_MSI))
11535                 pci_disable_msi(tp->pdev);
11536         tg3_flag_clear(tp, USING_MSI);
11537         tg3_flag_clear(tp, USING_MSIX);
11538         tg3_flag_clear(tp, ENABLE_RSS);
11539         tg3_flag_clear(tp, ENABLE_TSS);
11540 }
11541
11542 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11543                      bool init)
11544 {
11545         struct net_device *dev = tp->dev;
11546         int i, err;
11547
11548         /*
11549          * Setup interrupts first so we know how
11550          * many NAPI resources to allocate
11551          */
11552         tg3_ints_init(tp);
11553
11554         tg3_rss_check_indir_tbl(tp);
11555
11556         /* The placement of this call is tied
11557          * to the setup and use of Host TX descriptors.
11558          */
11559         err = tg3_alloc_consistent(tp);
11560         if (err)
11561                 goto out_ints_fini;
11562
11563         tg3_napi_init(tp);
11564
11565         tg3_napi_enable(tp);
11566
11567         for (i = 0; i < tp->irq_cnt; i++) {
11568                 err = tg3_request_irq(tp, i);
11569                 if (err) {
11570                         for (i--; i >= 0; i--) {
11571                                 struct tg3_napi *tnapi = &tp->napi[i];
11572
11573                                 free_irq(tnapi->irq_vec, tnapi);
11574                         }
11575                         goto out_napi_fini;
11576                 }
11577         }
11578
11579         tg3_full_lock(tp, 0);
11580
11581         if (init)
11582                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11583
11584         err = tg3_init_hw(tp, reset_phy);
11585         if (err) {
11586                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11587                 tg3_free_rings(tp);
11588         }
11589
11590         tg3_full_unlock(tp);
11591
11592         if (err)
11593                 goto out_free_irq;
11594
11595         if (test_irq && tg3_flag(tp, USING_MSI)) {
11596                 err = tg3_test_msi(tp);
11597
11598                 if (err) {
11599                         tg3_full_lock(tp, 0);
11600                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11601                         tg3_free_rings(tp);
11602                         tg3_full_unlock(tp);
11603
11604                         goto out_napi_fini;
11605                 }
11606
11607                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11608                         u32 val = tr32(PCIE_TRANSACTION_CFG);
11609
11610                         tw32(PCIE_TRANSACTION_CFG,
11611                              val | PCIE_TRANS_CFG_1SHOT_MSI);
11612                 }
11613         }
11614
11615         tg3_phy_start(tp);
11616
11617         tg3_hwmon_open(tp);
11618
11619         tg3_full_lock(tp, 0);
11620
11621         tg3_timer_start(tp);
11622         tg3_flag_set(tp, INIT_COMPLETE);
11623         tg3_enable_ints(tp);
11624
11625         tg3_ptp_resume(tp);
11626
11627         tg3_full_unlock(tp);
11628
11629         netif_tx_start_all_queues(dev);
11630
11631         /*
11632          * Reset loopback feature if it was turned on while the device was down
11633          * make sure that it's installed properly now.
11634          */
11635         if (dev->features & NETIF_F_LOOPBACK)
11636                 tg3_set_loopback(dev, dev->features);
11637
11638         return 0;
11639
11640 out_free_irq:
11641         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11642                 struct tg3_napi *tnapi = &tp->napi[i];
11643                 free_irq(tnapi->irq_vec, tnapi);
11644         }
11645
11646 out_napi_fini:
11647         tg3_napi_disable(tp);
11648         tg3_napi_fini(tp);
11649         tg3_free_consistent(tp);
11650
11651 out_ints_fini:
11652         tg3_ints_fini(tp);
11653
11654         return err;
11655 }
11656
11657 static void tg3_stop(struct tg3 *tp)
11658 {
11659         int i;
11660
11661         tg3_reset_task_cancel(tp);
11662         tg3_netif_stop(tp);
11663
11664         tg3_timer_stop(tp);
11665
11666         tg3_hwmon_close(tp);
11667
11668         tg3_phy_stop(tp);
11669
11670         tg3_full_lock(tp, 1);
11671
11672         tg3_disable_ints(tp);
11673
11674         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11675         tg3_free_rings(tp);
11676         tg3_flag_clear(tp, INIT_COMPLETE);
11677
11678         tg3_full_unlock(tp);
11679
11680         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11681                 struct tg3_napi *tnapi = &tp->napi[i];
11682                 free_irq(tnapi->irq_vec, tnapi);
11683         }
11684
11685         tg3_ints_fini(tp);
11686
11687         tg3_napi_fini(tp);
11688
11689         tg3_free_consistent(tp);
11690 }
11691
11692 static int tg3_open(struct net_device *dev)
11693 {
11694         struct tg3 *tp = netdev_priv(dev);
11695         int err;
11696
11697         if (tp->pcierr_recovery) {
11698                 netdev_err(dev, "Failed to open device. PCI error recovery "
11699                            "in progress\n");
11700                 return -EAGAIN;
11701         }
11702
11703         if (tp->fw_needed) {
11704                 err = tg3_request_firmware(tp);
11705                 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11706                         if (err) {
11707                                 netdev_warn(tp->dev, "EEE capability disabled\n");
11708                                 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11709                         } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11710                                 netdev_warn(tp->dev, "EEE capability restored\n");
11711                                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11712                         }
11713                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11714                         if (err)
11715                                 return err;
11716                 } else if (err) {
11717                         netdev_warn(tp->dev, "TSO capability disabled\n");
11718                         tg3_flag_clear(tp, TSO_CAPABLE);
11719                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11720                         netdev_notice(tp->dev, "TSO capability restored\n");
11721                         tg3_flag_set(tp, TSO_CAPABLE);
11722                 }
11723         }
11724
11725         tg3_carrier_off(tp);
11726
11727         err = tg3_power_up(tp);
11728         if (err)
11729                 return err;
11730
11731         tg3_full_lock(tp, 0);
11732
11733         tg3_disable_ints(tp);
11734         tg3_flag_clear(tp, INIT_COMPLETE);
11735
11736         tg3_full_unlock(tp);
11737
11738         err = tg3_start(tp,
11739                         !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11740                         true, true);
11741         if (err) {
11742                 tg3_frob_aux_power(tp, false);
11743                 pci_set_power_state(tp->pdev, PCI_D3hot);
11744         }
11745
11746         return err;
11747 }
11748
11749 static int tg3_close(struct net_device *dev)
11750 {
11751         struct tg3 *tp = netdev_priv(dev);
11752
11753         if (tp->pcierr_recovery) {
11754                 netdev_err(dev, "Failed to close device. PCI error recovery "
11755                            "in progress\n");
11756                 return -EAGAIN;
11757         }
11758
11759         tg3_stop(tp);
11760
11761         if (pci_device_is_present(tp->pdev)) {
11762                 tg3_power_down_prepare(tp);
11763
11764                 tg3_carrier_off(tp);
11765         }
11766         return 0;
11767 }
11768
11769 static inline u64 get_stat64(tg3_stat64_t *val)
11770 {
11771        return ((u64)val->high << 32) | ((u64)val->low);
11772 }
11773
11774 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11775 {
11776         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11777
11778         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11779             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11780              tg3_asic_rev(tp) == ASIC_REV_5701)) {
11781                 u32 val;
11782
11783                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11784                         tg3_writephy(tp, MII_TG3_TEST1,
11785                                      val | MII_TG3_TEST1_CRC_EN);
11786                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11787                 } else
11788                         val = 0;
11789
11790                 tp->phy_crc_errors += val;
11791
11792                 return tp->phy_crc_errors;
11793         }
11794
11795         return get_stat64(&hw_stats->rx_fcs_errors);
11796 }
11797
11798 #define ESTAT_ADD(member) \
11799         estats->member =        old_estats->member + \
11800                                 get_stat64(&hw_stats->member)
11801
11802 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11803 {
11804         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11805         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11806
11807         ESTAT_ADD(rx_octets);
11808         ESTAT_ADD(rx_fragments);
11809         ESTAT_ADD(rx_ucast_packets);
11810         ESTAT_ADD(rx_mcast_packets);
11811         ESTAT_ADD(rx_bcast_packets);
11812         ESTAT_ADD(rx_fcs_errors);
11813         ESTAT_ADD(rx_align_errors);
11814         ESTAT_ADD(rx_xon_pause_rcvd);
11815         ESTAT_ADD(rx_xoff_pause_rcvd);
11816         ESTAT_ADD(rx_mac_ctrl_rcvd);
11817         ESTAT_ADD(rx_xoff_entered);
11818         ESTAT_ADD(rx_frame_too_long_errors);
11819         ESTAT_ADD(rx_jabbers);
11820         ESTAT_ADD(rx_undersize_packets);
11821         ESTAT_ADD(rx_in_length_errors);
11822         ESTAT_ADD(rx_out_length_errors);
11823         ESTAT_ADD(rx_64_or_less_octet_packets);
11824         ESTAT_ADD(rx_65_to_127_octet_packets);
11825         ESTAT_ADD(rx_128_to_255_octet_packets);
11826         ESTAT_ADD(rx_256_to_511_octet_packets);
11827         ESTAT_ADD(rx_512_to_1023_octet_packets);
11828         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11829         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11830         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11831         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11832         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11833
11834         ESTAT_ADD(tx_octets);
11835         ESTAT_ADD(tx_collisions);
11836         ESTAT_ADD(tx_xon_sent);
11837         ESTAT_ADD(tx_xoff_sent);
11838         ESTAT_ADD(tx_flow_control);
11839         ESTAT_ADD(tx_mac_errors);
11840         ESTAT_ADD(tx_single_collisions);
11841         ESTAT_ADD(tx_mult_collisions);
11842         ESTAT_ADD(tx_deferred);
11843         ESTAT_ADD(tx_excessive_collisions);
11844         ESTAT_ADD(tx_late_collisions);
11845         ESTAT_ADD(tx_collide_2times);
11846         ESTAT_ADD(tx_collide_3times);
11847         ESTAT_ADD(tx_collide_4times);
11848         ESTAT_ADD(tx_collide_5times);
11849         ESTAT_ADD(tx_collide_6times);
11850         ESTAT_ADD(tx_collide_7times);
11851         ESTAT_ADD(tx_collide_8times);
11852         ESTAT_ADD(tx_collide_9times);
11853         ESTAT_ADD(tx_collide_10times);
11854         ESTAT_ADD(tx_collide_11times);
11855         ESTAT_ADD(tx_collide_12times);
11856         ESTAT_ADD(tx_collide_13times);
11857         ESTAT_ADD(tx_collide_14times);
11858         ESTAT_ADD(tx_collide_15times);
11859         ESTAT_ADD(tx_ucast_packets);
11860         ESTAT_ADD(tx_mcast_packets);
11861         ESTAT_ADD(tx_bcast_packets);
11862         ESTAT_ADD(tx_carrier_sense_errors);
11863         ESTAT_ADD(tx_discards);
11864         ESTAT_ADD(tx_errors);
11865
11866         ESTAT_ADD(dma_writeq_full);
11867         ESTAT_ADD(dma_write_prioq_full);
11868         ESTAT_ADD(rxbds_empty);
11869         ESTAT_ADD(rx_discards);
11870         ESTAT_ADD(rx_errors);
11871         ESTAT_ADD(rx_threshold_hit);
11872
11873         ESTAT_ADD(dma_readq_full);
11874         ESTAT_ADD(dma_read_prioq_full);
11875         ESTAT_ADD(tx_comp_queue_full);
11876
11877         ESTAT_ADD(ring_set_send_prod_index);
11878         ESTAT_ADD(ring_status_update);
11879         ESTAT_ADD(nic_irqs);
11880         ESTAT_ADD(nic_avoided_irqs);
11881         ESTAT_ADD(nic_tx_threshold_hit);
11882
11883         ESTAT_ADD(mbuf_lwm_thresh_hit);
11884 }
11885
11886 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11887 {
11888         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11889         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11890
11891         stats->rx_packets = old_stats->rx_packets +
11892                 get_stat64(&hw_stats->rx_ucast_packets) +
11893                 get_stat64(&hw_stats->rx_mcast_packets) +
11894                 get_stat64(&hw_stats->rx_bcast_packets);
11895
11896         stats->tx_packets = old_stats->tx_packets +
11897                 get_stat64(&hw_stats->tx_ucast_packets) +
11898                 get_stat64(&hw_stats->tx_mcast_packets) +
11899                 get_stat64(&hw_stats->tx_bcast_packets);
11900
11901         stats->rx_bytes = old_stats->rx_bytes +
11902                 get_stat64(&hw_stats->rx_octets);
11903         stats->tx_bytes = old_stats->tx_bytes +
11904                 get_stat64(&hw_stats->tx_octets);
11905
11906         stats->rx_errors = old_stats->rx_errors +
11907                 get_stat64(&hw_stats->rx_errors);
11908         stats->tx_errors = old_stats->tx_errors +
11909                 get_stat64(&hw_stats->tx_errors) +
11910                 get_stat64(&hw_stats->tx_mac_errors) +
11911                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11912                 get_stat64(&hw_stats->tx_discards);
11913
11914         stats->multicast = old_stats->multicast +
11915                 get_stat64(&hw_stats->rx_mcast_packets);
11916         stats->collisions = old_stats->collisions +
11917                 get_stat64(&hw_stats->tx_collisions);
11918
11919         stats->rx_length_errors = old_stats->rx_length_errors +
11920                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11921                 get_stat64(&hw_stats->rx_undersize_packets);
11922
11923         stats->rx_frame_errors = old_stats->rx_frame_errors +
11924                 get_stat64(&hw_stats->rx_align_errors);
11925         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11926                 get_stat64(&hw_stats->tx_discards);
11927         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11928                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11929
11930         stats->rx_crc_errors = old_stats->rx_crc_errors +
11931                 tg3_calc_crc_errors(tp);
11932
11933         stats->rx_missed_errors = old_stats->rx_missed_errors +
11934                 get_stat64(&hw_stats->rx_discards);
11935
11936         stats->rx_dropped = tp->rx_dropped;
11937         stats->tx_dropped = tp->tx_dropped;
11938 }
11939
11940 static int tg3_get_regs_len(struct net_device *dev)
11941 {
11942         return TG3_REG_BLK_SIZE;
11943 }
11944
11945 static void tg3_get_regs(struct net_device *dev,
11946                 struct ethtool_regs *regs, void *_p)
11947 {
11948         struct tg3 *tp = netdev_priv(dev);
11949
11950         regs->version = 0;
11951
11952         memset(_p, 0, TG3_REG_BLK_SIZE);
11953
11954         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11955                 return;
11956
11957         tg3_full_lock(tp, 0);
11958
11959         tg3_dump_legacy_regs(tp, (u32 *)_p);
11960
11961         tg3_full_unlock(tp);
11962 }
11963
11964 static int tg3_get_eeprom_len(struct net_device *dev)
11965 {
11966         struct tg3 *tp = netdev_priv(dev);
11967
11968         return tp->nvram_size;
11969 }
11970
11971 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11972 {
11973         struct tg3 *tp = netdev_priv(dev);
11974         int ret, cpmu_restore = 0;
11975         u8  *pd;
11976         u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11977         __be32 val;
11978
11979         if (tg3_flag(tp, NO_NVRAM))
11980                 return -EINVAL;
11981
11982         offset = eeprom->offset;
11983         len = eeprom->len;
11984         eeprom->len = 0;
11985
11986         eeprom->magic = TG3_EEPROM_MAGIC;
11987
11988         /* Override clock, link aware and link idle modes */
11989         if (tg3_flag(tp, CPMU_PRESENT)) {
11990                 cpmu_val = tr32(TG3_CPMU_CTRL);
11991                 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
11992                                 CPMU_CTRL_LINK_IDLE_MODE)) {
11993                         tw32(TG3_CPMU_CTRL, cpmu_val &
11994                                             ~(CPMU_CTRL_LINK_AWARE_MODE |
11995                                              CPMU_CTRL_LINK_IDLE_MODE));
11996                         cpmu_restore = 1;
11997                 }
11998         }
11999         tg3_override_clk(tp);
12000
12001         if (offset & 3) {
12002                 /* adjustments to start on required 4 byte boundary */
12003                 b_offset = offset & 3;
12004                 b_count = 4 - b_offset;
12005                 if (b_count > len) {
12006                         /* i.e. offset=1 len=2 */
12007                         b_count = len;
12008                 }
12009                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12010                 if (ret)
12011                         goto eeprom_done;
12012                 memcpy(data, ((char *)&val) + b_offset, b_count);
12013                 len -= b_count;
12014                 offset += b_count;
12015                 eeprom->len += b_count;
12016         }
12017
12018         /* read bytes up to the last 4 byte boundary */
12019         pd = &data[eeprom->len];
12020         for (i = 0; i < (len - (len & 3)); i += 4) {
12021                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12022                 if (ret) {
12023                         if (i)
12024                                 i -= 4;
12025                         eeprom->len += i;
12026                         goto eeprom_done;
12027                 }
12028                 memcpy(pd + i, &val, 4);
12029                 if (need_resched()) {
12030                         if (signal_pending(current)) {
12031                                 eeprom->len += i;
12032                                 ret = -EINTR;
12033                                 goto eeprom_done;
12034                         }
12035                         cond_resched();
12036                 }
12037         }
12038         eeprom->len += i;
12039
12040         if (len & 3) {
12041                 /* read last bytes not ending on 4 byte boundary */
12042                 pd = &data[eeprom->len];
12043                 b_count = len & 3;
12044                 b_offset = offset + len - b_count;
12045                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12046                 if (ret)
12047                         goto eeprom_done;
12048                 memcpy(pd, &val, b_count);
12049                 eeprom->len += b_count;
12050         }
12051         ret = 0;
12052
12053 eeprom_done:
12054         /* Restore clock, link aware and link idle modes */
12055         tg3_restore_clk(tp);
12056         if (cpmu_restore)
12057                 tw32(TG3_CPMU_CTRL, cpmu_val);
12058
12059         return ret;
12060 }
12061
12062 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12063 {
12064         struct tg3 *tp = netdev_priv(dev);
12065         int ret;
12066         u32 offset, len, b_offset, odd_len;
12067         u8 *buf;
12068         __be32 start = 0, end;
12069
12070         if (tg3_flag(tp, NO_NVRAM) ||
12071             eeprom->magic != TG3_EEPROM_MAGIC)
12072                 return -EINVAL;
12073
12074         offset = eeprom->offset;
12075         len = eeprom->len;
12076
12077         if ((b_offset = (offset & 3))) {
12078                 /* adjustments to start on required 4 byte boundary */
12079                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12080                 if (ret)
12081                         return ret;
12082                 len += b_offset;
12083                 offset &= ~3;
12084                 if (len < 4)
12085                         len = 4;
12086         }
12087
12088         odd_len = 0;
12089         if (len & 3) {
12090                 /* adjustments to end on required 4 byte boundary */
12091                 odd_len = 1;
12092                 len = (len + 3) & ~3;
12093                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12094                 if (ret)
12095                         return ret;
12096         }
12097
12098         buf = data;
12099         if (b_offset || odd_len) {
12100                 buf = kmalloc(len, GFP_KERNEL);
12101                 if (!buf)
12102                         return -ENOMEM;
12103                 if (b_offset)
12104                         memcpy(buf, &start, 4);
12105                 if (odd_len)
12106                         memcpy(buf+len-4, &end, 4);
12107                 memcpy(buf + b_offset, data, eeprom->len);
12108         }
12109
12110         ret = tg3_nvram_write_block(tp, offset, len, buf);
12111
12112         if (buf != data)
12113                 kfree(buf);
12114
12115         return ret;
12116 }
12117
12118 static int tg3_get_link_ksettings(struct net_device *dev,
12119                                   struct ethtool_link_ksettings *cmd)
12120 {
12121         struct tg3 *tp = netdev_priv(dev);
12122         u32 supported, advertising;
12123
12124         if (tg3_flag(tp, USE_PHYLIB)) {
12125                 struct phy_device *phydev;
12126                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12127                         return -EAGAIN;
12128                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12129                 phy_ethtool_ksettings_get(phydev, cmd);
12130
12131                 return 0;
12132         }
12133
12134         supported = (SUPPORTED_Autoneg);
12135
12136         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12137                 supported |= (SUPPORTED_1000baseT_Half |
12138                               SUPPORTED_1000baseT_Full);
12139
12140         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12141                 supported |= (SUPPORTED_100baseT_Half |
12142                               SUPPORTED_100baseT_Full |
12143                               SUPPORTED_10baseT_Half |
12144                               SUPPORTED_10baseT_Full |
12145                               SUPPORTED_TP);
12146                 cmd->base.port = PORT_TP;
12147         } else {
12148                 supported |= SUPPORTED_FIBRE;
12149                 cmd->base.port = PORT_FIBRE;
12150         }
12151         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12152                                                 supported);
12153
12154         advertising = tp->link_config.advertising;
12155         if (tg3_flag(tp, PAUSE_AUTONEG)) {
12156                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12157                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12158                                 advertising |= ADVERTISED_Pause;
12159                         } else {
12160                                 advertising |= ADVERTISED_Pause |
12161                                         ADVERTISED_Asym_Pause;
12162                         }
12163                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12164                         advertising |= ADVERTISED_Asym_Pause;
12165                 }
12166         }
12167         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12168                                                 advertising);
12169
12170         if (netif_running(dev) && tp->link_up) {
12171                 cmd->base.speed = tp->link_config.active_speed;
12172                 cmd->base.duplex = tp->link_config.active_duplex;
12173                 ethtool_convert_legacy_u32_to_link_mode(
12174                         cmd->link_modes.lp_advertising,
12175                         tp->link_config.rmt_adv);
12176
12177                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12178                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12179                                 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12180                         else
12181                                 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12182                 }
12183         } else {
12184                 cmd->base.speed = SPEED_UNKNOWN;
12185                 cmd->base.duplex = DUPLEX_UNKNOWN;
12186                 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12187         }
12188         cmd->base.phy_address = tp->phy_addr;
12189         cmd->base.autoneg = tp->link_config.autoneg;
12190         return 0;
12191 }
12192
12193 static int tg3_set_link_ksettings(struct net_device *dev,
12194                                   const struct ethtool_link_ksettings *cmd)
12195 {
12196         struct tg3 *tp = netdev_priv(dev);
12197         u32 speed = cmd->base.speed;
12198         u32 advertising;
12199
12200         if (tg3_flag(tp, USE_PHYLIB)) {
12201                 struct phy_device *phydev;
12202                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12203                         return -EAGAIN;
12204                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12205                 return phy_ethtool_ksettings_set(phydev, cmd);
12206         }
12207
12208         if (cmd->base.autoneg != AUTONEG_ENABLE &&
12209             cmd->base.autoneg != AUTONEG_DISABLE)
12210                 return -EINVAL;
12211
12212         if (cmd->base.autoneg == AUTONEG_DISABLE &&
12213             cmd->base.duplex != DUPLEX_FULL &&
12214             cmd->base.duplex != DUPLEX_HALF)
12215                 return -EINVAL;
12216
12217         ethtool_convert_link_mode_to_legacy_u32(&advertising,
12218                                                 cmd->link_modes.advertising);
12219
12220         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12221                 u32 mask = ADVERTISED_Autoneg |
12222                            ADVERTISED_Pause |
12223                            ADVERTISED_Asym_Pause;
12224
12225                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12226                         mask |= ADVERTISED_1000baseT_Half |
12227                                 ADVERTISED_1000baseT_Full;
12228
12229                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12230                         mask |= ADVERTISED_100baseT_Half |
12231                                 ADVERTISED_100baseT_Full |
12232                                 ADVERTISED_10baseT_Half |
12233                                 ADVERTISED_10baseT_Full |
12234                                 ADVERTISED_TP;
12235                 else
12236                         mask |= ADVERTISED_FIBRE;
12237
12238                 if (advertising & ~mask)
12239                         return -EINVAL;
12240
12241                 mask &= (ADVERTISED_1000baseT_Half |
12242                          ADVERTISED_1000baseT_Full |
12243                          ADVERTISED_100baseT_Half |
12244                          ADVERTISED_100baseT_Full |
12245                          ADVERTISED_10baseT_Half |
12246                          ADVERTISED_10baseT_Full);
12247
12248                 advertising &= mask;
12249         } else {
12250                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12251                         if (speed != SPEED_1000)
12252                                 return -EINVAL;
12253
12254                         if (cmd->base.duplex != DUPLEX_FULL)
12255                                 return -EINVAL;
12256                 } else {
12257                         if (speed != SPEED_100 &&
12258                             speed != SPEED_10)
12259                                 return -EINVAL;
12260                 }
12261         }
12262
12263         tg3_full_lock(tp, 0);
12264
12265         tp->link_config.autoneg = cmd->base.autoneg;
12266         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12267                 tp->link_config.advertising = (advertising |
12268                                               ADVERTISED_Autoneg);
12269                 tp->link_config.speed = SPEED_UNKNOWN;
12270                 tp->link_config.duplex = DUPLEX_UNKNOWN;
12271         } else {
12272                 tp->link_config.advertising = 0;
12273                 tp->link_config.speed = speed;
12274                 tp->link_config.duplex = cmd->base.duplex;
12275         }
12276
12277         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12278
12279         tg3_warn_mgmt_link_flap(tp);
12280
12281         if (netif_running(dev))
12282                 tg3_setup_phy(tp, true);
12283
12284         tg3_full_unlock(tp);
12285
12286         return 0;
12287 }
12288
12289 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12290 {
12291         struct tg3 *tp = netdev_priv(dev);
12292
12293         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12294         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12295         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12296 }
12297
12298 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12299 {
12300         struct tg3 *tp = netdev_priv(dev);
12301
12302         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12303                 wol->supported = WAKE_MAGIC;
12304         else
12305                 wol->supported = 0;
12306         wol->wolopts = 0;
12307         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12308                 wol->wolopts = WAKE_MAGIC;
12309         memset(&wol->sopass, 0, sizeof(wol->sopass));
12310 }
12311
12312 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12313 {
12314         struct tg3 *tp = netdev_priv(dev);
12315         struct device *dp = &tp->pdev->dev;
12316
12317         if (wol->wolopts & ~WAKE_MAGIC)
12318                 return -EINVAL;
12319         if ((wol->wolopts & WAKE_MAGIC) &&
12320             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12321                 return -EINVAL;
12322
12323         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12324
12325         if (device_may_wakeup(dp))
12326                 tg3_flag_set(tp, WOL_ENABLE);
12327         else
12328                 tg3_flag_clear(tp, WOL_ENABLE);
12329
12330         return 0;
12331 }
12332
12333 static u32 tg3_get_msglevel(struct net_device *dev)
12334 {
12335         struct tg3 *tp = netdev_priv(dev);
12336         return tp->msg_enable;
12337 }
12338
12339 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12340 {
12341         struct tg3 *tp = netdev_priv(dev);
12342         tp->msg_enable = value;
12343 }
12344
12345 static int tg3_nway_reset(struct net_device *dev)
12346 {
12347         struct tg3 *tp = netdev_priv(dev);
12348         int r;
12349
12350         if (!netif_running(dev))
12351                 return -EAGAIN;
12352
12353         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12354                 return -EINVAL;
12355
12356         tg3_warn_mgmt_link_flap(tp);
12357
12358         if (tg3_flag(tp, USE_PHYLIB)) {
12359                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12360                         return -EAGAIN;
12361                 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12362         } else {
12363                 u32 bmcr;
12364
12365                 spin_lock_bh(&tp->lock);
12366                 r = -EINVAL;
12367                 tg3_readphy(tp, MII_BMCR, &bmcr);
12368                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12369                     ((bmcr & BMCR_ANENABLE) ||
12370                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12371                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12372                                                    BMCR_ANENABLE);
12373                         r = 0;
12374                 }
12375                 spin_unlock_bh(&tp->lock);
12376         }
12377
12378         return r;
12379 }
12380
12381 static void tg3_get_ringparam(struct net_device *dev,
12382                               struct ethtool_ringparam *ering,
12383                               struct kernel_ethtool_ringparam *kernel_ering,
12384                               struct netlink_ext_ack *extack)
12385 {
12386         struct tg3 *tp = netdev_priv(dev);
12387
12388         ering->rx_max_pending = tp->rx_std_ring_mask;
12389         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12390                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12391         else
12392                 ering->rx_jumbo_max_pending = 0;
12393
12394         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12395
12396         ering->rx_pending = tp->rx_pending;
12397         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12398                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12399         else
12400                 ering->rx_jumbo_pending = 0;
12401
12402         ering->tx_pending = tp->napi[0].tx_pending;
12403 }
12404
12405 static int tg3_set_ringparam(struct net_device *dev,
12406                              struct ethtool_ringparam *ering,
12407                              struct kernel_ethtool_ringparam *kernel_ering,
12408                              struct netlink_ext_ack *extack)
12409 {
12410         struct tg3 *tp = netdev_priv(dev);
12411         int i, irq_sync = 0, err = 0;
12412         bool reset_phy = false;
12413
12414         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12415             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12416             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12417             (ering->tx_pending <= MAX_SKB_FRAGS) ||
12418             (tg3_flag(tp, TSO_BUG) &&
12419              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12420                 return -EINVAL;
12421
12422         if (netif_running(dev)) {
12423                 tg3_phy_stop(tp);
12424                 tg3_netif_stop(tp);
12425                 irq_sync = 1;
12426         }
12427
12428         tg3_full_lock(tp, irq_sync);
12429
12430         tp->rx_pending = ering->rx_pending;
12431
12432         if (tg3_flag(tp, MAX_RXPEND_64) &&
12433             tp->rx_pending > 63)
12434                 tp->rx_pending = 63;
12435
12436         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12437                 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12438
12439         for (i = 0; i < tp->irq_max; i++)
12440                 tp->napi[i].tx_pending = ering->tx_pending;
12441
12442         if (netif_running(dev)) {
12443                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12444                 /* Reset PHY to avoid PHY lock up */
12445                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12446                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
12447                     tg3_asic_rev(tp) == ASIC_REV_5720)
12448                         reset_phy = true;
12449
12450                 err = tg3_restart_hw(tp, reset_phy);
12451                 if (!err)
12452                         tg3_netif_start(tp);
12453         }
12454
12455         tg3_full_unlock(tp);
12456
12457         if (irq_sync && !err)
12458                 tg3_phy_start(tp);
12459
12460         return err;
12461 }
12462
12463 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12464 {
12465         struct tg3 *tp = netdev_priv(dev);
12466
12467         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12468
12469         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12470                 epause->rx_pause = 1;
12471         else
12472                 epause->rx_pause = 0;
12473
12474         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12475                 epause->tx_pause = 1;
12476         else
12477                 epause->tx_pause = 0;
12478 }
12479
12480 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12481 {
12482         struct tg3 *tp = netdev_priv(dev);
12483         int err = 0;
12484         bool reset_phy = false;
12485
12486         if (tp->link_config.autoneg == AUTONEG_ENABLE)
12487                 tg3_warn_mgmt_link_flap(tp);
12488
12489         if (tg3_flag(tp, USE_PHYLIB)) {
12490                 struct phy_device *phydev;
12491
12492                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12493
12494                 if (!phy_validate_pause(phydev, epause))
12495                         return -EINVAL;
12496
12497                 tp->link_config.flowctrl = 0;
12498                 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
12499                 if (epause->rx_pause) {
12500                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12501
12502                         if (epause->tx_pause) {
12503                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12504                         }
12505                 } else if (epause->tx_pause) {
12506                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12507                 }
12508
12509                 if (epause->autoneg)
12510                         tg3_flag_set(tp, PAUSE_AUTONEG);
12511                 else
12512                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12513
12514                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12515                         if (phydev->autoneg) {
12516                                 /* phy_set_asym_pause() will
12517                                  * renegotiate the link to inform our
12518                                  * link partner of our flow control
12519                                  * settings, even if the flow control
12520                                  * is forced.  Let tg3_adjust_link()
12521                                  * do the final flow control setup.
12522                                  */
12523                                 return 0;
12524                         }
12525
12526                         if (!epause->autoneg)
12527                                 tg3_setup_flow_control(tp, 0, 0);
12528                 }
12529         } else {
12530                 int irq_sync = 0;
12531
12532                 if (netif_running(dev)) {
12533                         tg3_netif_stop(tp);
12534                         irq_sync = 1;
12535                 }
12536
12537                 tg3_full_lock(tp, irq_sync);
12538
12539                 if (epause->autoneg)
12540                         tg3_flag_set(tp, PAUSE_AUTONEG);
12541                 else
12542                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12543                 if (epause->rx_pause)
12544                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12545                 else
12546                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12547                 if (epause->tx_pause)
12548                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12549                 else
12550                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12551
12552                 if (netif_running(dev)) {
12553                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12554                         /* Reset PHY to avoid PHY lock up */
12555                         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12556                             tg3_asic_rev(tp) == ASIC_REV_5719 ||
12557                             tg3_asic_rev(tp) == ASIC_REV_5720)
12558                                 reset_phy = true;
12559
12560                         err = tg3_restart_hw(tp, reset_phy);
12561                         if (!err)
12562                                 tg3_netif_start(tp);
12563                 }
12564
12565                 tg3_full_unlock(tp);
12566         }
12567
12568         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12569
12570         return err;
12571 }
12572
12573 static int tg3_get_sset_count(struct net_device *dev, int sset)
12574 {
12575         switch (sset) {
12576         case ETH_SS_TEST:
12577                 return TG3_NUM_TEST;
12578         case ETH_SS_STATS:
12579                 return TG3_NUM_STATS;
12580         default:
12581                 return -EOPNOTSUPP;
12582         }
12583 }
12584
12585 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12586                          u32 *rules __always_unused)
12587 {
12588         struct tg3 *tp = netdev_priv(dev);
12589
12590         if (!tg3_flag(tp, SUPPORT_MSIX))
12591                 return -EOPNOTSUPP;
12592
12593         switch (info->cmd) {
12594         case ETHTOOL_GRXRINGS:
12595                 if (netif_running(tp->dev))
12596                         info->data = tp->rxq_cnt;
12597                 else {
12598                         info->data = num_online_cpus();
12599                         if (info->data > TG3_RSS_MAX_NUM_QS)
12600                                 info->data = TG3_RSS_MAX_NUM_QS;
12601                 }
12602
12603                 return 0;
12604
12605         default:
12606                 return -EOPNOTSUPP;
12607         }
12608 }
12609
12610 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12611 {
12612         u32 size = 0;
12613         struct tg3 *tp = netdev_priv(dev);
12614
12615         if (tg3_flag(tp, SUPPORT_MSIX))
12616                 size = TG3_RSS_INDIR_TBL_SIZE;
12617
12618         return size;
12619 }
12620
12621 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12622 {
12623         struct tg3 *tp = netdev_priv(dev);
12624         int i;
12625
12626         if (hfunc)
12627                 *hfunc = ETH_RSS_HASH_TOP;
12628         if (!indir)
12629                 return 0;
12630
12631         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12632                 indir[i] = tp->rss_ind_tbl[i];
12633
12634         return 0;
12635 }
12636
12637 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12638                         const u8 hfunc)
12639 {
12640         struct tg3 *tp = netdev_priv(dev);
12641         size_t i;
12642
12643         /* We require at least one supported parameter to be changed and no
12644          * change in any of the unsupported parameters
12645          */
12646         if (key ||
12647             (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12648                 return -EOPNOTSUPP;
12649
12650         if (!indir)
12651                 return 0;
12652
12653         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12654                 tp->rss_ind_tbl[i] = indir[i];
12655
12656         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12657                 return 0;
12658
12659         /* It is legal to write the indirection
12660          * table while the device is running.
12661          */
12662         tg3_full_lock(tp, 0);
12663         tg3_rss_write_indir_tbl(tp);
12664         tg3_full_unlock(tp);
12665
12666         return 0;
12667 }
12668
12669 static void tg3_get_channels(struct net_device *dev,
12670                              struct ethtool_channels *channel)
12671 {
12672         struct tg3 *tp = netdev_priv(dev);
12673         u32 deflt_qs = netif_get_num_default_rss_queues();
12674
12675         channel->max_rx = tp->rxq_max;
12676         channel->max_tx = tp->txq_max;
12677
12678         if (netif_running(dev)) {
12679                 channel->rx_count = tp->rxq_cnt;
12680                 channel->tx_count = tp->txq_cnt;
12681         } else {
12682                 if (tp->rxq_req)
12683                         channel->rx_count = tp->rxq_req;
12684                 else
12685                         channel->rx_count = min(deflt_qs, tp->rxq_max);
12686
12687                 if (tp->txq_req)
12688                         channel->tx_count = tp->txq_req;
12689                 else
12690                         channel->tx_count = min(deflt_qs, tp->txq_max);
12691         }
12692 }
12693
12694 static int tg3_set_channels(struct net_device *dev,
12695                             struct ethtool_channels *channel)
12696 {
12697         struct tg3 *tp = netdev_priv(dev);
12698
12699         if (!tg3_flag(tp, SUPPORT_MSIX))
12700                 return -EOPNOTSUPP;
12701
12702         if (channel->rx_count > tp->rxq_max ||
12703             channel->tx_count > tp->txq_max)
12704                 return -EINVAL;
12705
12706         tp->rxq_req = channel->rx_count;
12707         tp->txq_req = channel->tx_count;
12708
12709         if (!netif_running(dev))
12710                 return 0;
12711
12712         tg3_stop(tp);
12713
12714         tg3_carrier_off(tp);
12715
12716         tg3_start(tp, true, false, false);
12717
12718         return 0;
12719 }
12720
12721 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12722 {
12723         switch (stringset) {
12724         case ETH_SS_STATS:
12725                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12726                 break;
12727         case ETH_SS_TEST:
12728                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12729                 break;
12730         default:
12731                 WARN_ON(1);     /* we need a WARN() */
12732                 break;
12733         }
12734 }
12735
12736 static int tg3_set_phys_id(struct net_device *dev,
12737                             enum ethtool_phys_id_state state)
12738 {
12739         struct tg3 *tp = netdev_priv(dev);
12740
12741         switch (state) {
12742         case ETHTOOL_ID_ACTIVE:
12743                 return 1;       /* cycle on/off once per second */
12744
12745         case ETHTOOL_ID_ON:
12746                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12747                      LED_CTRL_1000MBPS_ON |
12748                      LED_CTRL_100MBPS_ON |
12749                      LED_CTRL_10MBPS_ON |
12750                      LED_CTRL_TRAFFIC_OVERRIDE |
12751                      LED_CTRL_TRAFFIC_BLINK |
12752                      LED_CTRL_TRAFFIC_LED);
12753                 break;
12754
12755         case ETHTOOL_ID_OFF:
12756                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12757                      LED_CTRL_TRAFFIC_OVERRIDE);
12758                 break;
12759
12760         case ETHTOOL_ID_INACTIVE:
12761                 tw32(MAC_LED_CTRL, tp->led_ctrl);
12762                 break;
12763         }
12764
12765         return 0;
12766 }
12767
12768 static void tg3_get_ethtool_stats(struct net_device *dev,
12769                                    struct ethtool_stats *estats, u64 *tmp_stats)
12770 {
12771         struct tg3 *tp = netdev_priv(dev);
12772
12773         if (tp->hw_stats)
12774                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12775         else
12776                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12777 }
12778
12779 static __be32 *tg3_vpd_readblock(struct tg3 *tp, unsigned int *vpdlen)
12780 {
12781         int i;
12782         __be32 *buf;
12783         u32 offset = 0, len = 0;
12784         u32 magic, val;
12785
12786         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12787                 return NULL;
12788
12789         if (magic == TG3_EEPROM_MAGIC) {
12790                 for (offset = TG3_NVM_DIR_START;
12791                      offset < TG3_NVM_DIR_END;
12792                      offset += TG3_NVM_DIRENT_SIZE) {
12793                         if (tg3_nvram_read(tp, offset, &val))
12794                                 return NULL;
12795
12796                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12797                             TG3_NVM_DIRTYPE_EXTVPD)
12798                                 break;
12799                 }
12800
12801                 if (offset != TG3_NVM_DIR_END) {
12802                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12803                         if (tg3_nvram_read(tp, offset + 4, &offset))
12804                                 return NULL;
12805
12806                         offset = tg3_nvram_logical_addr(tp, offset);
12807                 }
12808
12809                 if (!offset || !len) {
12810                         offset = TG3_NVM_VPD_OFF;
12811                         len = TG3_NVM_VPD_LEN;
12812                 }
12813
12814                 buf = kmalloc(len, GFP_KERNEL);
12815                 if (!buf)
12816                         return NULL;
12817
12818                 for (i = 0; i < len; i += 4) {
12819                         /* The data is in little-endian format in NVRAM.
12820                          * Use the big-endian read routines to preserve
12821                          * the byte order as it exists in NVRAM.
12822                          */
12823                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12824                                 goto error;
12825                 }
12826                 *vpdlen = len;
12827         } else {
12828                 buf = pci_vpd_alloc(tp->pdev, vpdlen);
12829                 if (IS_ERR(buf))
12830                         return NULL;
12831         }
12832
12833         return buf;
12834
12835 error:
12836         kfree(buf);
12837         return NULL;
12838 }
12839
12840 #define NVRAM_TEST_SIZE 0x100
12841 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
12842 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
12843 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
12844 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
12845 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
12846 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
12847 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12848 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12849
12850 static int tg3_test_nvram(struct tg3 *tp)
12851 {
12852         u32 csum, magic;
12853         __be32 *buf;
12854         int i, j, k, err = 0, size;
12855         unsigned int len;
12856
12857         if (tg3_flag(tp, NO_NVRAM))
12858                 return 0;
12859
12860         if (tg3_nvram_read(tp, 0, &magic) != 0)
12861                 return -EIO;
12862
12863         if (magic == TG3_EEPROM_MAGIC)
12864                 size = NVRAM_TEST_SIZE;
12865         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12866                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12867                     TG3_EEPROM_SB_FORMAT_1) {
12868                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12869                         case TG3_EEPROM_SB_REVISION_0:
12870                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12871                                 break;
12872                         case TG3_EEPROM_SB_REVISION_2:
12873                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12874                                 break;
12875                         case TG3_EEPROM_SB_REVISION_3:
12876                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12877                                 break;
12878                         case TG3_EEPROM_SB_REVISION_4:
12879                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12880                                 break;
12881                         case TG3_EEPROM_SB_REVISION_5:
12882                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12883                                 break;
12884                         case TG3_EEPROM_SB_REVISION_6:
12885                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12886                                 break;
12887                         default:
12888                                 return -EIO;
12889                         }
12890                 } else
12891                         return 0;
12892         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12893                 size = NVRAM_SELFBOOT_HW_SIZE;
12894         else
12895                 return -EIO;
12896
12897         buf = kmalloc(size, GFP_KERNEL);
12898         if (buf == NULL)
12899                 return -ENOMEM;
12900
12901         err = -EIO;
12902         for (i = 0, j = 0; i < size; i += 4, j++) {
12903                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12904                 if (err)
12905                         break;
12906         }
12907         if (i < size)
12908                 goto out;
12909
12910         /* Selfboot format */
12911         magic = be32_to_cpu(buf[0]);
12912         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12913             TG3_EEPROM_MAGIC_FW) {
12914                 u8 *buf8 = (u8 *) buf, csum8 = 0;
12915
12916                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12917                     TG3_EEPROM_SB_REVISION_2) {
12918                         /* For rev 2, the csum doesn't include the MBA. */
12919                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12920                                 csum8 += buf8[i];
12921                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12922                                 csum8 += buf8[i];
12923                 } else {
12924                         for (i = 0; i < size; i++)
12925                                 csum8 += buf8[i];
12926                 }
12927
12928                 if (csum8 == 0) {
12929                         err = 0;
12930                         goto out;
12931                 }
12932
12933                 err = -EIO;
12934                 goto out;
12935         }
12936
12937         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12938             TG3_EEPROM_MAGIC_HW) {
12939                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12940                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12941                 u8 *buf8 = (u8 *) buf;
12942
12943                 /* Separate the parity bits and the data bytes.  */
12944                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12945                         if ((i == 0) || (i == 8)) {
12946                                 int l;
12947                                 u8 msk;
12948
12949                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12950                                         parity[k++] = buf8[i] & msk;
12951                                 i++;
12952                         } else if (i == 16) {
12953                                 int l;
12954                                 u8 msk;
12955
12956                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12957                                         parity[k++] = buf8[i] & msk;
12958                                 i++;
12959
12960                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12961                                         parity[k++] = buf8[i] & msk;
12962                                 i++;
12963                         }
12964                         data[j++] = buf8[i];
12965                 }
12966
12967                 err = -EIO;
12968                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12969                         u8 hw8 = hweight8(data[i]);
12970
12971                         if ((hw8 & 0x1) && parity[i])
12972                                 goto out;
12973                         else if (!(hw8 & 0x1) && !parity[i])
12974                                 goto out;
12975                 }
12976                 err = 0;
12977                 goto out;
12978         }
12979
12980         err = -EIO;
12981
12982         /* Bootstrap checksum at offset 0x10 */
12983         csum = calc_crc((unsigned char *) buf, 0x10);
12984         if (csum != le32_to_cpu(buf[0x10/4]))
12985                 goto out;
12986
12987         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12988         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12989         if (csum != le32_to_cpu(buf[0xfc/4]))
12990                 goto out;
12991
12992         kfree(buf);
12993
12994         buf = tg3_vpd_readblock(tp, &len);
12995         if (!buf)
12996                 return -ENOMEM;
12997
12998         err = pci_vpd_check_csum(buf, len);
12999         /* go on if no checksum found */
13000         if (err == 1)
13001                 err = 0;
13002 out:
13003         kfree(buf);
13004         return err;
13005 }
13006
13007 #define TG3_SERDES_TIMEOUT_SEC  2
13008 #define TG3_COPPER_TIMEOUT_SEC  6
13009
13010 static int tg3_test_link(struct tg3 *tp)
13011 {
13012         int i, max;
13013
13014         if (!netif_running(tp->dev))
13015                 return -ENODEV;
13016
13017         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13018                 max = TG3_SERDES_TIMEOUT_SEC;
13019         else
13020                 max = TG3_COPPER_TIMEOUT_SEC;
13021
13022         for (i = 0; i < max; i++) {
13023                 if (tp->link_up)
13024                         return 0;
13025
13026                 if (msleep_interruptible(1000))
13027                         break;
13028         }
13029
13030         return -EIO;
13031 }
13032
13033 /* Only test the commonly used registers */
13034 static int tg3_test_registers(struct tg3 *tp)
13035 {
13036         int i, is_5705, is_5750;
13037         u32 offset, read_mask, write_mask, val, save_val, read_val;
13038         static struct {
13039                 u16 offset;
13040                 u16 flags;
13041 #define TG3_FL_5705     0x1
13042 #define TG3_FL_NOT_5705 0x2
13043 #define TG3_FL_NOT_5788 0x4
13044 #define TG3_FL_NOT_5750 0x8
13045                 u32 read_mask;
13046                 u32 write_mask;
13047         } reg_tbl[] = {
13048                 /* MAC Control Registers */
13049                 { MAC_MODE, TG3_FL_NOT_5705,
13050                         0x00000000, 0x00ef6f8c },
13051                 { MAC_MODE, TG3_FL_5705,
13052                         0x00000000, 0x01ef6b8c },
13053                 { MAC_STATUS, TG3_FL_NOT_5705,
13054                         0x03800107, 0x00000000 },
13055                 { MAC_STATUS, TG3_FL_5705,
13056                         0x03800100, 0x00000000 },
13057                 { MAC_ADDR_0_HIGH, 0x0000,
13058                         0x00000000, 0x0000ffff },
13059                 { MAC_ADDR_0_LOW, 0x0000,
13060                         0x00000000, 0xffffffff },
13061                 { MAC_RX_MTU_SIZE, 0x0000,
13062                         0x00000000, 0x0000ffff },
13063                 { MAC_TX_MODE, 0x0000,
13064                         0x00000000, 0x00000070 },
13065                 { MAC_TX_LENGTHS, 0x0000,
13066                         0x00000000, 0x00003fff },
13067                 { MAC_RX_MODE, TG3_FL_NOT_5705,
13068                         0x00000000, 0x000007fc },
13069                 { MAC_RX_MODE, TG3_FL_5705,
13070                         0x00000000, 0x000007dc },
13071                 { MAC_HASH_REG_0, 0x0000,
13072                         0x00000000, 0xffffffff },
13073                 { MAC_HASH_REG_1, 0x0000,
13074                         0x00000000, 0xffffffff },
13075                 { MAC_HASH_REG_2, 0x0000,
13076                         0x00000000, 0xffffffff },
13077                 { MAC_HASH_REG_3, 0x0000,
13078                         0x00000000, 0xffffffff },
13079
13080                 /* Receive Data and Receive BD Initiator Control Registers. */
13081                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13082                         0x00000000, 0xffffffff },
13083                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13084                         0x00000000, 0xffffffff },
13085                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13086                         0x00000000, 0x00000003 },
13087                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13088                         0x00000000, 0xffffffff },
13089                 { RCVDBDI_STD_BD+0, 0x0000,
13090                         0x00000000, 0xffffffff },
13091                 { RCVDBDI_STD_BD+4, 0x0000,
13092                         0x00000000, 0xffffffff },
13093                 { RCVDBDI_STD_BD+8, 0x0000,
13094                         0x00000000, 0xffff0002 },
13095                 { RCVDBDI_STD_BD+0xc, 0x0000,
13096                         0x00000000, 0xffffffff },
13097
13098                 /* Receive BD Initiator Control Registers. */
13099                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13100                         0x00000000, 0xffffffff },
13101                 { RCVBDI_STD_THRESH, TG3_FL_5705,
13102                         0x00000000, 0x000003ff },
13103                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13104                         0x00000000, 0xffffffff },
13105
13106                 /* Host Coalescing Control Registers. */
13107                 { HOSTCC_MODE, TG3_FL_NOT_5705,
13108                         0x00000000, 0x00000004 },
13109                 { HOSTCC_MODE, TG3_FL_5705,
13110                         0x00000000, 0x000000f6 },
13111                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13112                         0x00000000, 0xffffffff },
13113                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13114                         0x00000000, 0x000003ff },
13115                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13116                         0x00000000, 0xffffffff },
13117                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13118                         0x00000000, 0x000003ff },
13119                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13120                         0x00000000, 0xffffffff },
13121                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13122                         0x00000000, 0x000000ff },
13123                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13124                         0x00000000, 0xffffffff },
13125                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13126                         0x00000000, 0x000000ff },
13127                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13128                         0x00000000, 0xffffffff },
13129                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13130                         0x00000000, 0xffffffff },
13131                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13132                         0x00000000, 0xffffffff },
13133                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13134                         0x00000000, 0x000000ff },
13135                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13136                         0x00000000, 0xffffffff },
13137                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13138                         0x00000000, 0x000000ff },
13139                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13140                         0x00000000, 0xffffffff },
13141                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13142                         0x00000000, 0xffffffff },
13143                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13144                         0x00000000, 0xffffffff },
13145                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13146                         0x00000000, 0xffffffff },
13147                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13148                         0x00000000, 0xffffffff },
13149                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13150                         0xffffffff, 0x00000000 },
13151                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13152                         0xffffffff, 0x00000000 },
13153
13154                 /* Buffer Manager Control Registers. */
13155                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13156                         0x00000000, 0x007fff80 },
13157                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13158                         0x00000000, 0x007fffff },
13159                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13160                         0x00000000, 0x0000003f },
13161                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13162                         0x00000000, 0x000001ff },
13163                 { BUFMGR_MB_HIGH_WATER, 0x0000,
13164                         0x00000000, 0x000001ff },
13165                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13166                         0xffffffff, 0x00000000 },
13167                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13168                         0xffffffff, 0x00000000 },
13169
13170                 /* Mailbox Registers */
13171                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13172                         0x00000000, 0x000001ff },
13173                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13174                         0x00000000, 0x000001ff },
13175                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13176                         0x00000000, 0x000007ff },
13177                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13178                         0x00000000, 0x000001ff },
13179
13180                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13181         };
13182
13183         is_5705 = is_5750 = 0;
13184         if (tg3_flag(tp, 5705_PLUS)) {
13185                 is_5705 = 1;
13186                 if (tg3_flag(tp, 5750_PLUS))
13187                         is_5750 = 1;
13188         }
13189
13190         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13191                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13192                         continue;
13193
13194                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13195                         continue;
13196
13197                 if (tg3_flag(tp, IS_5788) &&
13198                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
13199                         continue;
13200
13201                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13202                         continue;
13203
13204                 offset = (u32) reg_tbl[i].offset;
13205                 read_mask = reg_tbl[i].read_mask;
13206                 write_mask = reg_tbl[i].write_mask;
13207
13208                 /* Save the original register content */
13209                 save_val = tr32(offset);
13210
13211                 /* Determine the read-only value. */
13212                 read_val = save_val & read_mask;
13213
13214                 /* Write zero to the register, then make sure the read-only bits
13215                  * are not changed and the read/write bits are all zeros.
13216                  */
13217                 tw32(offset, 0);
13218
13219                 val = tr32(offset);
13220
13221                 /* Test the read-only and read/write bits. */
13222                 if (((val & read_mask) != read_val) || (val & write_mask))
13223                         goto out;
13224
13225                 /* Write ones to all the bits defined by RdMask and WrMask, then
13226                  * make sure the read-only bits are not changed and the
13227                  * read/write bits are all ones.
13228                  */
13229                 tw32(offset, read_mask | write_mask);
13230
13231                 val = tr32(offset);
13232
13233                 /* Test the read-only bits. */
13234                 if ((val & read_mask) != read_val)
13235                         goto out;
13236
13237                 /* Test the read/write bits. */
13238                 if ((val & write_mask) != write_mask)
13239                         goto out;
13240
13241                 tw32(offset, save_val);
13242         }
13243
13244         return 0;
13245
13246 out:
13247         if (netif_msg_hw(tp))
13248                 netdev_err(tp->dev,
13249                            "Register test failed at offset %x\n", offset);
13250         tw32(offset, save_val);
13251         return -EIO;
13252 }
13253
13254 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13255 {
13256         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13257         int i;
13258         u32 j;
13259
13260         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13261                 for (j = 0; j < len; j += 4) {
13262                         u32 val;
13263
13264                         tg3_write_mem(tp, offset + j, test_pattern[i]);
13265                         tg3_read_mem(tp, offset + j, &val);
13266                         if (val != test_pattern[i])
13267                                 return -EIO;
13268                 }
13269         }
13270         return 0;
13271 }
13272
13273 static int tg3_test_memory(struct tg3 *tp)
13274 {
13275         static struct mem_entry {
13276                 u32 offset;
13277                 u32 len;
13278         } mem_tbl_570x[] = {
13279                 { 0x00000000, 0x00b50},
13280                 { 0x00002000, 0x1c000},
13281                 { 0xffffffff, 0x00000}
13282         }, mem_tbl_5705[] = {
13283                 { 0x00000100, 0x0000c},
13284                 { 0x00000200, 0x00008},
13285                 { 0x00004000, 0x00800},
13286                 { 0x00006000, 0x01000},
13287                 { 0x00008000, 0x02000},
13288                 { 0x00010000, 0x0e000},
13289                 { 0xffffffff, 0x00000}
13290         }, mem_tbl_5755[] = {
13291                 { 0x00000200, 0x00008},
13292                 { 0x00004000, 0x00800},
13293                 { 0x00006000, 0x00800},
13294                 { 0x00008000, 0x02000},
13295                 { 0x00010000, 0x0c000},
13296                 { 0xffffffff, 0x00000}
13297         }, mem_tbl_5906[] = {
13298                 { 0x00000200, 0x00008},
13299                 { 0x00004000, 0x00400},
13300                 { 0x00006000, 0x00400},
13301                 { 0x00008000, 0x01000},
13302                 { 0x00010000, 0x01000},
13303                 { 0xffffffff, 0x00000}
13304         }, mem_tbl_5717[] = {
13305                 { 0x00000200, 0x00008},
13306                 { 0x00010000, 0x0a000},
13307                 { 0x00020000, 0x13c00},
13308                 { 0xffffffff, 0x00000}
13309         }, mem_tbl_57765[] = {
13310                 { 0x00000200, 0x00008},
13311                 { 0x00004000, 0x00800},
13312                 { 0x00006000, 0x09800},
13313                 { 0x00010000, 0x0a000},
13314                 { 0xffffffff, 0x00000}
13315         };
13316         struct mem_entry *mem_tbl;
13317         int err = 0;
13318         int i;
13319
13320         if (tg3_flag(tp, 5717_PLUS))
13321                 mem_tbl = mem_tbl_5717;
13322         else if (tg3_flag(tp, 57765_CLASS) ||
13323                  tg3_asic_rev(tp) == ASIC_REV_5762)
13324                 mem_tbl = mem_tbl_57765;
13325         else if (tg3_flag(tp, 5755_PLUS))
13326                 mem_tbl = mem_tbl_5755;
13327         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13328                 mem_tbl = mem_tbl_5906;
13329         else if (tg3_flag(tp, 5705_PLUS))
13330                 mem_tbl = mem_tbl_5705;
13331         else
13332                 mem_tbl = mem_tbl_570x;
13333
13334         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13335                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13336                 if (err)
13337                         break;
13338         }
13339
13340         return err;
13341 }
13342
13343 #define TG3_TSO_MSS             500
13344
13345 #define TG3_TSO_IP_HDR_LEN      20
13346 #define TG3_TSO_TCP_HDR_LEN     20
13347 #define TG3_TSO_TCP_OPT_LEN     12
13348
13349 static const u8 tg3_tso_header[] = {
13350 0x08, 0x00,
13351 0x45, 0x00, 0x00, 0x00,
13352 0x00, 0x00, 0x40, 0x00,
13353 0x40, 0x06, 0x00, 0x00,
13354 0x0a, 0x00, 0x00, 0x01,
13355 0x0a, 0x00, 0x00, 0x02,
13356 0x0d, 0x00, 0xe0, 0x00,
13357 0x00, 0x00, 0x01, 0x00,
13358 0x00, 0x00, 0x02, 0x00,
13359 0x80, 0x10, 0x10, 0x00,
13360 0x14, 0x09, 0x00, 0x00,
13361 0x01, 0x01, 0x08, 0x0a,
13362 0x11, 0x11, 0x11, 0x11,
13363 0x11, 0x11, 0x11, 0x11,
13364 };
13365
13366 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13367 {
13368         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13369         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13370         u32 budget;
13371         struct sk_buff *skb;
13372         u8 *tx_data, *rx_data;
13373         dma_addr_t map;
13374         int num_pkts, tx_len, rx_len, i, err;
13375         struct tg3_rx_buffer_desc *desc;
13376         struct tg3_napi *tnapi, *rnapi;
13377         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13378
13379         tnapi = &tp->napi[0];
13380         rnapi = &tp->napi[0];
13381         if (tp->irq_cnt > 1) {
13382                 if (tg3_flag(tp, ENABLE_RSS))
13383                         rnapi = &tp->napi[1];
13384                 if (tg3_flag(tp, ENABLE_TSS))
13385                         tnapi = &tp->napi[1];
13386         }
13387         coal_now = tnapi->coal_now | rnapi->coal_now;
13388
13389         err = -EIO;
13390
13391         tx_len = pktsz;
13392         skb = netdev_alloc_skb(tp->dev, tx_len);
13393         if (!skb)
13394                 return -ENOMEM;
13395
13396         tx_data = skb_put(skb, tx_len);
13397         memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13398         memset(tx_data + ETH_ALEN, 0x0, 8);
13399
13400         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13401
13402         if (tso_loopback) {
13403                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13404
13405                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13406                               TG3_TSO_TCP_OPT_LEN;
13407
13408                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13409                        sizeof(tg3_tso_header));
13410                 mss = TG3_TSO_MSS;
13411
13412                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13413                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13414
13415                 /* Set the total length field in the IP header */
13416                 iph->tot_len = htons((u16)(mss + hdr_len));
13417
13418                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13419                               TXD_FLAG_CPU_POST_DMA);
13420
13421                 if (tg3_flag(tp, HW_TSO_1) ||
13422                     tg3_flag(tp, HW_TSO_2) ||
13423                     tg3_flag(tp, HW_TSO_3)) {
13424                         struct tcphdr *th;
13425                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13426                         th = (struct tcphdr *)&tx_data[val];
13427                         th->check = 0;
13428                 } else
13429                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
13430
13431                 if (tg3_flag(tp, HW_TSO_3)) {
13432                         mss |= (hdr_len & 0xc) << 12;
13433                         if (hdr_len & 0x10)
13434                                 base_flags |= 0x00000010;
13435                         base_flags |= (hdr_len & 0x3e0) << 5;
13436                 } else if (tg3_flag(tp, HW_TSO_2))
13437                         mss |= hdr_len << 9;
13438                 else if (tg3_flag(tp, HW_TSO_1) ||
13439                          tg3_asic_rev(tp) == ASIC_REV_5705) {
13440                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13441                 } else {
13442                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13443                 }
13444
13445                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13446         } else {
13447                 num_pkts = 1;
13448                 data_off = ETH_HLEN;
13449
13450                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13451                     tx_len > VLAN_ETH_FRAME_LEN)
13452                         base_flags |= TXD_FLAG_JMB_PKT;
13453         }
13454
13455         for (i = data_off; i < tx_len; i++)
13456                 tx_data[i] = (u8) (i & 0xff);
13457
13458         map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE);
13459         if (dma_mapping_error(&tp->pdev->dev, map)) {
13460                 dev_kfree_skb(skb);
13461                 return -EIO;
13462         }
13463
13464         val = tnapi->tx_prod;
13465         tnapi->tx_buffers[val].skb = skb;
13466         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13467
13468         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13469                rnapi->coal_now);
13470
13471         udelay(10);
13472
13473         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13474
13475         budget = tg3_tx_avail(tnapi);
13476         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13477                             base_flags | TXD_FLAG_END, mss, 0)) {
13478                 tnapi->tx_buffers[val].skb = NULL;
13479                 dev_kfree_skb(skb);
13480                 return -EIO;
13481         }
13482
13483         tnapi->tx_prod++;
13484
13485         /* Sync BD data before updating mailbox */
13486         wmb();
13487
13488         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13489         tr32_mailbox(tnapi->prodmbox);
13490
13491         udelay(10);
13492
13493         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13494         for (i = 0; i < 35; i++) {
13495                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13496                        coal_now);
13497
13498                 udelay(10);
13499
13500                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13501                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13502                 if ((tx_idx == tnapi->tx_prod) &&
13503                     (rx_idx == (rx_start_idx + num_pkts)))
13504                         break;
13505         }
13506
13507         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13508         dev_kfree_skb(skb);
13509
13510         if (tx_idx != tnapi->tx_prod)
13511                 goto out;
13512
13513         if (rx_idx != rx_start_idx + num_pkts)
13514                 goto out;
13515
13516         val = data_off;
13517         while (rx_idx != rx_start_idx) {
13518                 desc = &rnapi->rx_rcb[rx_start_idx++];
13519                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13520                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13521
13522                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13523                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13524                         goto out;
13525
13526                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13527                          - ETH_FCS_LEN;
13528
13529                 if (!tso_loopback) {
13530                         if (rx_len != tx_len)
13531                                 goto out;
13532
13533                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13534                                 if (opaque_key != RXD_OPAQUE_RING_STD)
13535                                         goto out;
13536                         } else {
13537                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13538                                         goto out;
13539                         }
13540                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13541                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13542                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
13543                         goto out;
13544                 }
13545
13546                 if (opaque_key == RXD_OPAQUE_RING_STD) {
13547                         rx_data = tpr->rx_std_buffers[desc_idx].data;
13548                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13549                                              mapping);
13550                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13551                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13552                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13553                                              mapping);
13554                 } else
13555                         goto out;
13556
13557                 dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len,
13558                                         DMA_FROM_DEVICE);
13559
13560                 rx_data += TG3_RX_OFFSET(tp);
13561                 for (i = data_off; i < rx_len; i++, val++) {
13562                         if (*(rx_data + i) != (u8) (val & 0xff))
13563                                 goto out;
13564                 }
13565         }
13566
13567         err = 0;
13568
13569         /* tg3_free_rings will unmap and free the rx_data */
13570 out:
13571         return err;
13572 }
13573
13574 #define TG3_STD_LOOPBACK_FAILED         1
13575 #define TG3_JMB_LOOPBACK_FAILED         2
13576 #define TG3_TSO_LOOPBACK_FAILED         4
13577 #define TG3_LOOPBACK_FAILED \
13578         (TG3_STD_LOOPBACK_FAILED | \
13579          TG3_JMB_LOOPBACK_FAILED | \
13580          TG3_TSO_LOOPBACK_FAILED)
13581
13582 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13583 {
13584         int err = -EIO;
13585         u32 eee_cap;
13586         u32 jmb_pkt_sz = 9000;
13587
13588         if (tp->dma_limit)
13589                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13590
13591         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13592         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13593
13594         if (!netif_running(tp->dev)) {
13595                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13596                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13597                 if (do_extlpbk)
13598                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13599                 goto done;
13600         }
13601
13602         err = tg3_reset_hw(tp, true);
13603         if (err) {
13604                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13605                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13606                 if (do_extlpbk)
13607                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13608                 goto done;
13609         }
13610
13611         if (tg3_flag(tp, ENABLE_RSS)) {
13612                 int i;
13613
13614                 /* Reroute all rx packets to the 1st queue */
13615                 for (i = MAC_RSS_INDIR_TBL_0;
13616                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13617                         tw32(i, 0x0);
13618         }
13619
13620         /* HW errata - mac loopback fails in some cases on 5780.
13621          * Normal traffic and PHY loopback are not affected by
13622          * errata.  Also, the MAC loopback test is deprecated for
13623          * all newer ASIC revisions.
13624          */
13625         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13626             !tg3_flag(tp, CPMU_PRESENT)) {
13627                 tg3_mac_loopback(tp, true);
13628
13629                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13630                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13631
13632                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13633                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13634                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13635
13636                 tg3_mac_loopback(tp, false);
13637         }
13638
13639         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13640             !tg3_flag(tp, USE_PHYLIB)) {
13641                 int i;
13642
13643                 tg3_phy_lpbk_set(tp, 0, false);
13644
13645                 /* Wait for link */
13646                 for (i = 0; i < 100; i++) {
13647                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13648                                 break;
13649                         mdelay(1);
13650                 }
13651
13652                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13653                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13654                 if (tg3_flag(tp, TSO_CAPABLE) &&
13655                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13656                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13657                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13658                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13659                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13660
13661                 if (do_extlpbk) {
13662                         tg3_phy_lpbk_set(tp, 0, true);
13663
13664                         /* All link indications report up, but the hardware
13665                          * isn't really ready for about 20 msec.  Double it
13666                          * to be sure.
13667                          */
13668                         mdelay(40);
13669
13670                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13671                                 data[TG3_EXT_LOOPB_TEST] |=
13672                                                         TG3_STD_LOOPBACK_FAILED;
13673                         if (tg3_flag(tp, TSO_CAPABLE) &&
13674                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13675                                 data[TG3_EXT_LOOPB_TEST] |=
13676                                                         TG3_TSO_LOOPBACK_FAILED;
13677                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13678                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13679                                 data[TG3_EXT_LOOPB_TEST] |=
13680                                                         TG3_JMB_LOOPBACK_FAILED;
13681                 }
13682
13683                 /* Re-enable gphy autopowerdown. */
13684                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13685                         tg3_phy_toggle_apd(tp, true);
13686         }
13687
13688         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13689                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13690
13691 done:
13692         tp->phy_flags |= eee_cap;
13693
13694         return err;
13695 }
13696
13697 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13698                           u64 *data)
13699 {
13700         struct tg3 *tp = netdev_priv(dev);
13701         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13702
13703         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13704                 if (tg3_power_up(tp)) {
13705                         etest->flags |= ETH_TEST_FL_FAILED;
13706                         memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13707                         return;
13708                 }
13709                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13710         }
13711
13712         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13713
13714         if (tg3_test_nvram(tp) != 0) {
13715                 etest->flags |= ETH_TEST_FL_FAILED;
13716                 data[TG3_NVRAM_TEST] = 1;
13717         }
13718         if (!doextlpbk && tg3_test_link(tp)) {
13719                 etest->flags |= ETH_TEST_FL_FAILED;
13720                 data[TG3_LINK_TEST] = 1;
13721         }
13722         if (etest->flags & ETH_TEST_FL_OFFLINE) {
13723                 int err, err2 = 0, irq_sync = 0;
13724
13725                 if (netif_running(dev)) {
13726                         tg3_phy_stop(tp);
13727                         tg3_netif_stop(tp);
13728                         irq_sync = 1;
13729                 }
13730
13731                 tg3_full_lock(tp, irq_sync);
13732                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13733                 err = tg3_nvram_lock(tp);
13734                 tg3_halt_cpu(tp, RX_CPU_BASE);
13735                 if (!tg3_flag(tp, 5705_PLUS))
13736                         tg3_halt_cpu(tp, TX_CPU_BASE);
13737                 if (!err)
13738                         tg3_nvram_unlock(tp);
13739
13740                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13741                         tg3_phy_reset(tp);
13742
13743                 if (tg3_test_registers(tp) != 0) {
13744                         etest->flags |= ETH_TEST_FL_FAILED;
13745                         data[TG3_REGISTER_TEST] = 1;
13746                 }
13747
13748                 if (tg3_test_memory(tp) != 0) {
13749                         etest->flags |= ETH_TEST_FL_FAILED;
13750                         data[TG3_MEMORY_TEST] = 1;
13751                 }
13752
13753                 if (doextlpbk)
13754                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13755
13756                 if (tg3_test_loopback(tp, data, doextlpbk))
13757                         etest->flags |= ETH_TEST_FL_FAILED;
13758
13759                 tg3_full_unlock(tp);
13760
13761                 if (tg3_test_interrupt(tp) != 0) {
13762                         etest->flags |= ETH_TEST_FL_FAILED;
13763                         data[TG3_INTERRUPT_TEST] = 1;
13764                 }
13765
13766                 tg3_full_lock(tp, 0);
13767
13768                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13769                 if (netif_running(dev)) {
13770                         tg3_flag_set(tp, INIT_COMPLETE);
13771                         err2 = tg3_restart_hw(tp, true);
13772                         if (!err2)
13773                                 tg3_netif_start(tp);
13774                 }
13775
13776                 tg3_full_unlock(tp);
13777
13778                 if (irq_sync && !err2)
13779                         tg3_phy_start(tp);
13780         }
13781         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13782                 tg3_power_down_prepare(tp);
13783
13784 }
13785
13786 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13787 {
13788         struct tg3 *tp = netdev_priv(dev);
13789         struct hwtstamp_config stmpconf;
13790
13791         if (!tg3_flag(tp, PTP_CAPABLE))
13792                 return -EOPNOTSUPP;
13793
13794         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13795                 return -EFAULT;
13796
13797         if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13798             stmpconf.tx_type != HWTSTAMP_TX_OFF)
13799                 return -ERANGE;
13800
13801         switch (stmpconf.rx_filter) {
13802         case HWTSTAMP_FILTER_NONE:
13803                 tp->rxptpctl = 0;
13804                 break;
13805         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13806                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13807                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13808                 break;
13809         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13810                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13811                                TG3_RX_PTP_CTL_SYNC_EVNT;
13812                 break;
13813         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13814                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13815                                TG3_RX_PTP_CTL_DELAY_REQ;
13816                 break;
13817         case HWTSTAMP_FILTER_PTP_V2_EVENT:
13818                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13819                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13820                 break;
13821         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13822                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13823                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13824                 break;
13825         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13826                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13827                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13828                 break;
13829         case HWTSTAMP_FILTER_PTP_V2_SYNC:
13830                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13831                                TG3_RX_PTP_CTL_SYNC_EVNT;
13832                 break;
13833         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13834                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13835                                TG3_RX_PTP_CTL_SYNC_EVNT;
13836                 break;
13837         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13838                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13839                                TG3_RX_PTP_CTL_SYNC_EVNT;
13840                 break;
13841         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13842                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13843                                TG3_RX_PTP_CTL_DELAY_REQ;
13844                 break;
13845         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13846                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13847                                TG3_RX_PTP_CTL_DELAY_REQ;
13848                 break;
13849         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13850                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13851                                TG3_RX_PTP_CTL_DELAY_REQ;
13852                 break;
13853         default:
13854                 return -ERANGE;
13855         }
13856
13857         if (netif_running(dev) && tp->rxptpctl)
13858                 tw32(TG3_RX_PTP_CTL,
13859                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13860
13861         if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13862                 tg3_flag_set(tp, TX_TSTAMP_EN);
13863         else
13864                 tg3_flag_clear(tp, TX_TSTAMP_EN);
13865
13866         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13867                 -EFAULT : 0;
13868 }
13869
13870 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13871 {
13872         struct tg3 *tp = netdev_priv(dev);
13873         struct hwtstamp_config stmpconf;
13874
13875         if (!tg3_flag(tp, PTP_CAPABLE))
13876                 return -EOPNOTSUPP;
13877
13878         stmpconf.flags = 0;
13879         stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13880                             HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13881
13882         switch (tp->rxptpctl) {
13883         case 0:
13884                 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13885                 break;
13886         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13887                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13888                 break;
13889         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13890                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13891                 break;
13892         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13893                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13894                 break;
13895         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13896                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13897                 break;
13898         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13899                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13900                 break;
13901         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13902                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13903                 break;
13904         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13905                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13906                 break;
13907         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13908                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13909                 break;
13910         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13911                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13912                 break;
13913         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13914                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13915                 break;
13916         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13917                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13918                 break;
13919         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13920                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13921                 break;
13922         default:
13923                 WARN_ON_ONCE(1);
13924                 return -ERANGE;
13925         }
13926
13927         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13928                 -EFAULT : 0;
13929 }
13930
13931 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13932 {
13933         struct mii_ioctl_data *data = if_mii(ifr);
13934         struct tg3 *tp = netdev_priv(dev);
13935         int err;
13936
13937         if (tg3_flag(tp, USE_PHYLIB)) {
13938                 struct phy_device *phydev;
13939                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13940                         return -EAGAIN;
13941                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
13942                 return phy_mii_ioctl(phydev, ifr, cmd);
13943         }
13944
13945         switch (cmd) {
13946         case SIOCGMIIPHY:
13947                 data->phy_id = tp->phy_addr;
13948
13949                 fallthrough;
13950         case SIOCGMIIREG: {
13951                 u32 mii_regval;
13952
13953                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13954                         break;                  /* We have no PHY */
13955
13956                 if (!netif_running(dev))
13957                         return -EAGAIN;
13958
13959                 spin_lock_bh(&tp->lock);
13960                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13961                                     data->reg_num & 0x1f, &mii_regval);
13962                 spin_unlock_bh(&tp->lock);
13963
13964                 data->val_out = mii_regval;
13965
13966                 return err;
13967         }
13968
13969         case SIOCSMIIREG:
13970                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13971                         break;                  /* We have no PHY */
13972
13973                 if (!netif_running(dev))
13974                         return -EAGAIN;
13975
13976                 spin_lock_bh(&tp->lock);
13977                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13978                                      data->reg_num & 0x1f, data->val_in);
13979                 spin_unlock_bh(&tp->lock);
13980
13981                 return err;
13982
13983         case SIOCSHWTSTAMP:
13984                 return tg3_hwtstamp_set(dev, ifr);
13985
13986         case SIOCGHWTSTAMP:
13987                 return tg3_hwtstamp_get(dev, ifr);
13988
13989         default:
13990                 /* do nothing */
13991                 break;
13992         }
13993         return -EOPNOTSUPP;
13994 }
13995
13996 static int tg3_get_coalesce(struct net_device *dev,
13997                             struct ethtool_coalesce *ec,
13998                             struct kernel_ethtool_coalesce *kernel_coal,
13999                             struct netlink_ext_ack *extack)
14000 {
14001         struct tg3 *tp = netdev_priv(dev);
14002
14003         memcpy(ec, &tp->coal, sizeof(*ec));
14004         return 0;
14005 }
14006
14007 static int tg3_set_coalesce(struct net_device *dev,
14008                             struct ethtool_coalesce *ec,
14009                             struct kernel_ethtool_coalesce *kernel_coal,
14010                             struct netlink_ext_ack *extack)
14011 {
14012         struct tg3 *tp = netdev_priv(dev);
14013         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14014         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14015
14016         if (!tg3_flag(tp, 5705_PLUS)) {
14017                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14018                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14019                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14020                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14021         }
14022
14023         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14024             (!ec->rx_coalesce_usecs) ||
14025             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14026             (!ec->tx_coalesce_usecs) ||
14027             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14028             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14029             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14030             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14031             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14032             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14033             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14034             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14035                 return -EINVAL;
14036
14037         /* Only copy relevant parameters, ignore all others. */
14038         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14039         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14040         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14041         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14042         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14043         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14044         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14045         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14046         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14047
14048         if (netif_running(dev)) {
14049                 tg3_full_lock(tp, 0);
14050                 __tg3_set_coalesce(tp, &tp->coal);
14051                 tg3_full_unlock(tp);
14052         }
14053         return 0;
14054 }
14055
14056 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14057 {
14058         struct tg3 *tp = netdev_priv(dev);
14059
14060         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14061                 netdev_warn(tp->dev, "Board does not support EEE!\n");
14062                 return -EOPNOTSUPP;
14063         }
14064
14065         if (edata->advertised != tp->eee.advertised) {
14066                 netdev_warn(tp->dev,
14067                             "Direct manipulation of EEE advertisement is not supported\n");
14068                 return -EINVAL;
14069         }
14070
14071         if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14072                 netdev_warn(tp->dev,
14073                             "Maximal Tx Lpi timer supported is %#x(u)\n",
14074                             TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14075                 return -EINVAL;
14076         }
14077
14078         tp->eee = *edata;
14079
14080         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14081         tg3_warn_mgmt_link_flap(tp);
14082
14083         if (netif_running(tp->dev)) {
14084                 tg3_full_lock(tp, 0);
14085                 tg3_setup_eee(tp);
14086                 tg3_phy_reset(tp);
14087                 tg3_full_unlock(tp);
14088         }
14089
14090         return 0;
14091 }
14092
14093 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14094 {
14095         struct tg3 *tp = netdev_priv(dev);
14096
14097         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14098                 netdev_warn(tp->dev,
14099                             "Board does not support EEE!\n");
14100                 return -EOPNOTSUPP;
14101         }
14102
14103         *edata = tp->eee;
14104         return 0;
14105 }
14106
14107 static const struct ethtool_ops tg3_ethtool_ops = {
14108         .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
14109                                      ETHTOOL_COALESCE_MAX_FRAMES |
14110                                      ETHTOOL_COALESCE_USECS_IRQ |
14111                                      ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
14112                                      ETHTOOL_COALESCE_STATS_BLOCK_USECS,
14113         .get_drvinfo            = tg3_get_drvinfo,
14114         .get_regs_len           = tg3_get_regs_len,
14115         .get_regs               = tg3_get_regs,
14116         .get_wol                = tg3_get_wol,
14117         .set_wol                = tg3_set_wol,
14118         .get_msglevel           = tg3_get_msglevel,
14119         .set_msglevel           = tg3_set_msglevel,
14120         .nway_reset             = tg3_nway_reset,
14121         .get_link               = ethtool_op_get_link,
14122         .get_eeprom_len         = tg3_get_eeprom_len,
14123         .get_eeprom             = tg3_get_eeprom,
14124         .set_eeprom             = tg3_set_eeprom,
14125         .get_ringparam          = tg3_get_ringparam,
14126         .set_ringparam          = tg3_set_ringparam,
14127         .get_pauseparam         = tg3_get_pauseparam,
14128         .set_pauseparam         = tg3_set_pauseparam,
14129         .self_test              = tg3_self_test,
14130         .get_strings            = tg3_get_strings,
14131         .set_phys_id            = tg3_set_phys_id,
14132         .get_ethtool_stats      = tg3_get_ethtool_stats,
14133         .get_coalesce           = tg3_get_coalesce,
14134         .set_coalesce           = tg3_set_coalesce,
14135         .get_sset_count         = tg3_get_sset_count,
14136         .get_rxnfc              = tg3_get_rxnfc,
14137         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14138         .get_rxfh               = tg3_get_rxfh,
14139         .set_rxfh               = tg3_set_rxfh,
14140         .get_channels           = tg3_get_channels,
14141         .set_channels           = tg3_set_channels,
14142         .get_ts_info            = tg3_get_ts_info,
14143         .get_eee                = tg3_get_eee,
14144         .set_eee                = tg3_set_eee,
14145         .get_link_ksettings     = tg3_get_link_ksettings,
14146         .set_link_ksettings     = tg3_set_link_ksettings,
14147 };
14148
14149 static void tg3_get_stats64(struct net_device *dev,
14150                             struct rtnl_link_stats64 *stats)
14151 {
14152         struct tg3 *tp = netdev_priv(dev);
14153
14154         spin_lock_bh(&tp->lock);
14155         if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14156                 *stats = tp->net_stats_prev;
14157                 spin_unlock_bh(&tp->lock);
14158                 return;
14159         }
14160
14161         tg3_get_nstats(tp, stats);
14162         spin_unlock_bh(&tp->lock);
14163 }
14164
14165 static void tg3_set_rx_mode(struct net_device *dev)
14166 {
14167         struct tg3 *tp = netdev_priv(dev);
14168
14169         if (!netif_running(dev))
14170                 return;
14171
14172         tg3_full_lock(tp, 0);
14173         __tg3_set_rx_mode(dev);
14174         tg3_full_unlock(tp);
14175 }
14176
14177 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14178                                int new_mtu)
14179 {
14180         dev->mtu = new_mtu;
14181
14182         if (new_mtu > ETH_DATA_LEN) {
14183                 if (tg3_flag(tp, 5780_CLASS)) {
14184                         netdev_update_features(dev);
14185                         tg3_flag_clear(tp, TSO_CAPABLE);
14186                 } else {
14187                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
14188                 }
14189         } else {
14190                 if (tg3_flag(tp, 5780_CLASS)) {
14191                         tg3_flag_set(tp, TSO_CAPABLE);
14192                         netdev_update_features(dev);
14193                 }
14194                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14195         }
14196 }
14197
14198 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14199 {
14200         struct tg3 *tp = netdev_priv(dev);
14201         int err;
14202         bool reset_phy = false;
14203
14204         if (!netif_running(dev)) {
14205                 /* We'll just catch it later when the
14206                  * device is up'd.
14207                  */
14208                 tg3_set_mtu(dev, tp, new_mtu);
14209                 return 0;
14210         }
14211
14212         tg3_phy_stop(tp);
14213
14214         tg3_netif_stop(tp);
14215
14216         tg3_set_mtu(dev, tp, new_mtu);
14217
14218         tg3_full_lock(tp, 1);
14219
14220         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14221
14222         /* Reset PHY, otherwise the read DMA engine will be in a mode that
14223          * breaks all requests to 256 bytes.
14224          */
14225         if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14226             tg3_asic_rev(tp) == ASIC_REV_5717 ||
14227             tg3_asic_rev(tp) == ASIC_REV_5719 ||
14228             tg3_asic_rev(tp) == ASIC_REV_5720)
14229                 reset_phy = true;
14230
14231         err = tg3_restart_hw(tp, reset_phy);
14232
14233         if (!err)
14234                 tg3_netif_start(tp);
14235
14236         tg3_full_unlock(tp);
14237
14238         if (!err)
14239                 tg3_phy_start(tp);
14240
14241         return err;
14242 }
14243
14244 static const struct net_device_ops tg3_netdev_ops = {
14245         .ndo_open               = tg3_open,
14246         .ndo_stop               = tg3_close,
14247         .ndo_start_xmit         = tg3_start_xmit,
14248         .ndo_get_stats64        = tg3_get_stats64,
14249         .ndo_validate_addr      = eth_validate_addr,
14250         .ndo_set_rx_mode        = tg3_set_rx_mode,
14251         .ndo_set_mac_address    = tg3_set_mac_addr,
14252         .ndo_eth_ioctl          = tg3_ioctl,
14253         .ndo_tx_timeout         = tg3_tx_timeout,
14254         .ndo_change_mtu         = tg3_change_mtu,
14255         .ndo_fix_features       = tg3_fix_features,
14256         .ndo_set_features       = tg3_set_features,
14257 #ifdef CONFIG_NET_POLL_CONTROLLER
14258         .ndo_poll_controller    = tg3_poll_controller,
14259 #endif
14260 };
14261
14262 static void tg3_get_eeprom_size(struct tg3 *tp)
14263 {
14264         u32 cursize, val, magic;
14265
14266         tp->nvram_size = EEPROM_CHIP_SIZE;
14267
14268         if (tg3_nvram_read(tp, 0, &magic) != 0)
14269                 return;
14270
14271         if ((magic != TG3_EEPROM_MAGIC) &&
14272             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14273             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14274                 return;
14275
14276         /*
14277          * Size the chip by reading offsets at increasing powers of two.
14278          * When we encounter our validation signature, we know the addressing
14279          * has wrapped around, and thus have our chip size.
14280          */
14281         cursize = 0x10;
14282
14283         while (cursize < tp->nvram_size) {
14284                 if (tg3_nvram_read(tp, cursize, &val) != 0)
14285                         return;
14286
14287                 if (val == magic)
14288                         break;
14289
14290                 cursize <<= 1;
14291         }
14292
14293         tp->nvram_size = cursize;
14294 }
14295
14296 static void tg3_get_nvram_size(struct tg3 *tp)
14297 {
14298         u32 val;
14299
14300         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14301                 return;
14302
14303         /* Selfboot format */
14304         if (val != TG3_EEPROM_MAGIC) {
14305                 tg3_get_eeprom_size(tp);
14306                 return;
14307         }
14308
14309         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14310                 if (val != 0) {
14311                         /* This is confusing.  We want to operate on the
14312                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14313                          * call will read from NVRAM and byteswap the data
14314                          * according to the byteswapping settings for all
14315                          * other register accesses.  This ensures the data we
14316                          * want will always reside in the lower 16-bits.
14317                          * However, the data in NVRAM is in LE format, which
14318                          * means the data from the NVRAM read will always be
14319                          * opposite the endianness of the CPU.  The 16-bit
14320                          * byteswap then brings the data to CPU endianness.
14321                          */
14322                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14323                         return;
14324                 }
14325         }
14326         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14327 }
14328
14329 static void tg3_get_nvram_info(struct tg3 *tp)
14330 {
14331         u32 nvcfg1;
14332
14333         nvcfg1 = tr32(NVRAM_CFG1);
14334         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14335                 tg3_flag_set(tp, FLASH);
14336         } else {
14337                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14338                 tw32(NVRAM_CFG1, nvcfg1);
14339         }
14340
14341         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14342             tg3_flag(tp, 5780_CLASS)) {
14343                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14344                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14345                         tp->nvram_jedecnum = JEDEC_ATMEL;
14346                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14347                         tg3_flag_set(tp, NVRAM_BUFFERED);
14348                         break;
14349                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14350                         tp->nvram_jedecnum = JEDEC_ATMEL;
14351                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14352                         break;
14353                 case FLASH_VENDOR_ATMEL_EEPROM:
14354                         tp->nvram_jedecnum = JEDEC_ATMEL;
14355                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14356                         tg3_flag_set(tp, NVRAM_BUFFERED);
14357                         break;
14358                 case FLASH_VENDOR_ST:
14359                         tp->nvram_jedecnum = JEDEC_ST;
14360                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14361                         tg3_flag_set(tp, NVRAM_BUFFERED);
14362                         break;
14363                 case FLASH_VENDOR_SAIFUN:
14364                         tp->nvram_jedecnum = JEDEC_SAIFUN;
14365                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14366                         break;
14367                 case FLASH_VENDOR_SST_SMALL:
14368                 case FLASH_VENDOR_SST_LARGE:
14369                         tp->nvram_jedecnum = JEDEC_SST;
14370                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14371                         break;
14372                 }
14373         } else {
14374                 tp->nvram_jedecnum = JEDEC_ATMEL;
14375                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14376                 tg3_flag_set(tp, NVRAM_BUFFERED);
14377         }
14378 }
14379
14380 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14381 {
14382         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14383         case FLASH_5752PAGE_SIZE_256:
14384                 tp->nvram_pagesize = 256;
14385                 break;
14386         case FLASH_5752PAGE_SIZE_512:
14387                 tp->nvram_pagesize = 512;
14388                 break;
14389         case FLASH_5752PAGE_SIZE_1K:
14390                 tp->nvram_pagesize = 1024;
14391                 break;
14392         case FLASH_5752PAGE_SIZE_2K:
14393                 tp->nvram_pagesize = 2048;
14394                 break;
14395         case FLASH_5752PAGE_SIZE_4K:
14396                 tp->nvram_pagesize = 4096;
14397                 break;
14398         case FLASH_5752PAGE_SIZE_264:
14399                 tp->nvram_pagesize = 264;
14400                 break;
14401         case FLASH_5752PAGE_SIZE_528:
14402                 tp->nvram_pagesize = 528;
14403                 break;
14404         }
14405 }
14406
14407 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14408 {
14409         u32 nvcfg1;
14410
14411         nvcfg1 = tr32(NVRAM_CFG1);
14412
14413         /* NVRAM protection for TPM */
14414         if (nvcfg1 & (1 << 27))
14415                 tg3_flag_set(tp, PROTECTED_NVRAM);
14416
14417         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14418         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14419         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14420                 tp->nvram_jedecnum = JEDEC_ATMEL;
14421                 tg3_flag_set(tp, NVRAM_BUFFERED);
14422                 break;
14423         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14424                 tp->nvram_jedecnum = JEDEC_ATMEL;
14425                 tg3_flag_set(tp, NVRAM_BUFFERED);
14426                 tg3_flag_set(tp, FLASH);
14427                 break;
14428         case FLASH_5752VENDOR_ST_M45PE10:
14429         case FLASH_5752VENDOR_ST_M45PE20:
14430         case FLASH_5752VENDOR_ST_M45PE40:
14431                 tp->nvram_jedecnum = JEDEC_ST;
14432                 tg3_flag_set(tp, NVRAM_BUFFERED);
14433                 tg3_flag_set(tp, FLASH);
14434                 break;
14435         }
14436
14437         if (tg3_flag(tp, FLASH)) {
14438                 tg3_nvram_get_pagesize(tp, nvcfg1);
14439         } else {
14440                 /* For eeprom, set pagesize to maximum eeprom size */
14441                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14442
14443                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14444                 tw32(NVRAM_CFG1, nvcfg1);
14445         }
14446 }
14447
14448 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14449 {
14450         u32 nvcfg1, protect = 0;
14451
14452         nvcfg1 = tr32(NVRAM_CFG1);
14453
14454         /* NVRAM protection for TPM */
14455         if (nvcfg1 & (1 << 27)) {
14456                 tg3_flag_set(tp, PROTECTED_NVRAM);
14457                 protect = 1;
14458         }
14459
14460         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14461         switch (nvcfg1) {
14462         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14463         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14464         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14465         case FLASH_5755VENDOR_ATMEL_FLASH_5:
14466                 tp->nvram_jedecnum = JEDEC_ATMEL;
14467                 tg3_flag_set(tp, NVRAM_BUFFERED);
14468                 tg3_flag_set(tp, FLASH);
14469                 tp->nvram_pagesize = 264;
14470                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14471                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14472                         tp->nvram_size = (protect ? 0x3e200 :
14473                                           TG3_NVRAM_SIZE_512KB);
14474                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14475                         tp->nvram_size = (protect ? 0x1f200 :
14476                                           TG3_NVRAM_SIZE_256KB);
14477                 else
14478                         tp->nvram_size = (protect ? 0x1f200 :
14479                                           TG3_NVRAM_SIZE_128KB);
14480                 break;
14481         case FLASH_5752VENDOR_ST_M45PE10:
14482         case FLASH_5752VENDOR_ST_M45PE20:
14483         case FLASH_5752VENDOR_ST_M45PE40:
14484                 tp->nvram_jedecnum = JEDEC_ST;
14485                 tg3_flag_set(tp, NVRAM_BUFFERED);
14486                 tg3_flag_set(tp, FLASH);
14487                 tp->nvram_pagesize = 256;
14488                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14489                         tp->nvram_size = (protect ?
14490                                           TG3_NVRAM_SIZE_64KB :
14491                                           TG3_NVRAM_SIZE_128KB);
14492                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14493                         tp->nvram_size = (protect ?
14494                                           TG3_NVRAM_SIZE_64KB :
14495                                           TG3_NVRAM_SIZE_256KB);
14496                 else
14497                         tp->nvram_size = (protect ?
14498                                           TG3_NVRAM_SIZE_128KB :
14499                                           TG3_NVRAM_SIZE_512KB);
14500                 break;
14501         }
14502 }
14503
14504 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14505 {
14506         u32 nvcfg1;
14507
14508         nvcfg1 = tr32(NVRAM_CFG1);
14509
14510         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14511         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14512         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14513         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14514         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14515                 tp->nvram_jedecnum = JEDEC_ATMEL;
14516                 tg3_flag_set(tp, NVRAM_BUFFERED);
14517                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14518
14519                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14520                 tw32(NVRAM_CFG1, nvcfg1);
14521                 break;
14522         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14523         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14524         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14525         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14526                 tp->nvram_jedecnum = JEDEC_ATMEL;
14527                 tg3_flag_set(tp, NVRAM_BUFFERED);
14528                 tg3_flag_set(tp, FLASH);
14529                 tp->nvram_pagesize = 264;
14530                 break;
14531         case FLASH_5752VENDOR_ST_M45PE10:
14532         case FLASH_5752VENDOR_ST_M45PE20:
14533         case FLASH_5752VENDOR_ST_M45PE40:
14534                 tp->nvram_jedecnum = JEDEC_ST;
14535                 tg3_flag_set(tp, NVRAM_BUFFERED);
14536                 tg3_flag_set(tp, FLASH);
14537                 tp->nvram_pagesize = 256;
14538                 break;
14539         }
14540 }
14541
14542 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14543 {
14544         u32 nvcfg1, protect = 0;
14545
14546         nvcfg1 = tr32(NVRAM_CFG1);
14547
14548         /* NVRAM protection for TPM */
14549         if (nvcfg1 & (1 << 27)) {
14550                 tg3_flag_set(tp, PROTECTED_NVRAM);
14551                 protect = 1;
14552         }
14553
14554         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14555         switch (nvcfg1) {
14556         case FLASH_5761VENDOR_ATMEL_ADB021D:
14557         case FLASH_5761VENDOR_ATMEL_ADB041D:
14558         case FLASH_5761VENDOR_ATMEL_ADB081D:
14559         case FLASH_5761VENDOR_ATMEL_ADB161D:
14560         case FLASH_5761VENDOR_ATMEL_MDB021D:
14561         case FLASH_5761VENDOR_ATMEL_MDB041D:
14562         case FLASH_5761VENDOR_ATMEL_MDB081D:
14563         case FLASH_5761VENDOR_ATMEL_MDB161D:
14564                 tp->nvram_jedecnum = JEDEC_ATMEL;
14565                 tg3_flag_set(tp, NVRAM_BUFFERED);
14566                 tg3_flag_set(tp, FLASH);
14567                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14568                 tp->nvram_pagesize = 256;
14569                 break;
14570         case FLASH_5761VENDOR_ST_A_M45PE20:
14571         case FLASH_5761VENDOR_ST_A_M45PE40:
14572         case FLASH_5761VENDOR_ST_A_M45PE80:
14573         case FLASH_5761VENDOR_ST_A_M45PE16:
14574         case FLASH_5761VENDOR_ST_M_M45PE20:
14575         case FLASH_5761VENDOR_ST_M_M45PE40:
14576         case FLASH_5761VENDOR_ST_M_M45PE80:
14577         case FLASH_5761VENDOR_ST_M_M45PE16:
14578                 tp->nvram_jedecnum = JEDEC_ST;
14579                 tg3_flag_set(tp, NVRAM_BUFFERED);
14580                 tg3_flag_set(tp, FLASH);
14581                 tp->nvram_pagesize = 256;
14582                 break;
14583         }
14584
14585         if (protect) {
14586                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14587         } else {
14588                 switch (nvcfg1) {
14589                 case FLASH_5761VENDOR_ATMEL_ADB161D:
14590                 case FLASH_5761VENDOR_ATMEL_MDB161D:
14591                 case FLASH_5761VENDOR_ST_A_M45PE16:
14592                 case FLASH_5761VENDOR_ST_M_M45PE16:
14593                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14594                         break;
14595                 case FLASH_5761VENDOR_ATMEL_ADB081D:
14596                 case FLASH_5761VENDOR_ATMEL_MDB081D:
14597                 case FLASH_5761VENDOR_ST_A_M45PE80:
14598                 case FLASH_5761VENDOR_ST_M_M45PE80:
14599                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14600                         break;
14601                 case FLASH_5761VENDOR_ATMEL_ADB041D:
14602                 case FLASH_5761VENDOR_ATMEL_MDB041D:
14603                 case FLASH_5761VENDOR_ST_A_M45PE40:
14604                 case FLASH_5761VENDOR_ST_M_M45PE40:
14605                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14606                         break;
14607                 case FLASH_5761VENDOR_ATMEL_ADB021D:
14608                 case FLASH_5761VENDOR_ATMEL_MDB021D:
14609                 case FLASH_5761VENDOR_ST_A_M45PE20:
14610                 case FLASH_5761VENDOR_ST_M_M45PE20:
14611                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14612                         break;
14613                 }
14614         }
14615 }
14616
14617 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14618 {
14619         tp->nvram_jedecnum = JEDEC_ATMEL;
14620         tg3_flag_set(tp, NVRAM_BUFFERED);
14621         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14622 }
14623
14624 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14625 {
14626         u32 nvcfg1;
14627
14628         nvcfg1 = tr32(NVRAM_CFG1);
14629
14630         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14631         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14632         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14633                 tp->nvram_jedecnum = JEDEC_ATMEL;
14634                 tg3_flag_set(tp, NVRAM_BUFFERED);
14635                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14636
14637                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14638                 tw32(NVRAM_CFG1, nvcfg1);
14639                 return;
14640         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14641         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14642         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14643         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14644         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14645         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14646         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14647                 tp->nvram_jedecnum = JEDEC_ATMEL;
14648                 tg3_flag_set(tp, NVRAM_BUFFERED);
14649                 tg3_flag_set(tp, FLASH);
14650
14651                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14652                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14653                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14654                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14655                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14656                         break;
14657                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14658                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14659                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14660                         break;
14661                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14662                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14663                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14664                         break;
14665                 }
14666                 break;
14667         case FLASH_5752VENDOR_ST_M45PE10:
14668         case FLASH_5752VENDOR_ST_M45PE20:
14669         case FLASH_5752VENDOR_ST_M45PE40:
14670                 tp->nvram_jedecnum = JEDEC_ST;
14671                 tg3_flag_set(tp, NVRAM_BUFFERED);
14672                 tg3_flag_set(tp, FLASH);
14673
14674                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14675                 case FLASH_5752VENDOR_ST_M45PE10:
14676                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14677                         break;
14678                 case FLASH_5752VENDOR_ST_M45PE20:
14679                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14680                         break;
14681                 case FLASH_5752VENDOR_ST_M45PE40:
14682                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14683                         break;
14684                 }
14685                 break;
14686         default:
14687                 tg3_flag_set(tp, NO_NVRAM);
14688                 return;
14689         }
14690
14691         tg3_nvram_get_pagesize(tp, nvcfg1);
14692         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14693                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14694 }
14695
14696
14697 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14698 {
14699         u32 nvcfg1;
14700
14701         nvcfg1 = tr32(NVRAM_CFG1);
14702
14703         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14704         case FLASH_5717VENDOR_ATMEL_EEPROM:
14705         case FLASH_5717VENDOR_MICRO_EEPROM:
14706                 tp->nvram_jedecnum = JEDEC_ATMEL;
14707                 tg3_flag_set(tp, NVRAM_BUFFERED);
14708                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14709
14710                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14711                 tw32(NVRAM_CFG1, nvcfg1);
14712                 return;
14713         case FLASH_5717VENDOR_ATMEL_MDB011D:
14714         case FLASH_5717VENDOR_ATMEL_ADB011B:
14715         case FLASH_5717VENDOR_ATMEL_ADB011D:
14716         case FLASH_5717VENDOR_ATMEL_MDB021D:
14717         case FLASH_5717VENDOR_ATMEL_ADB021B:
14718         case FLASH_5717VENDOR_ATMEL_ADB021D:
14719         case FLASH_5717VENDOR_ATMEL_45USPT:
14720                 tp->nvram_jedecnum = JEDEC_ATMEL;
14721                 tg3_flag_set(tp, NVRAM_BUFFERED);
14722                 tg3_flag_set(tp, FLASH);
14723
14724                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14725                 case FLASH_5717VENDOR_ATMEL_MDB021D:
14726                         /* Detect size with tg3_nvram_get_size() */
14727                         break;
14728                 case FLASH_5717VENDOR_ATMEL_ADB021B:
14729                 case FLASH_5717VENDOR_ATMEL_ADB021D:
14730                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14731                         break;
14732                 default:
14733                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14734                         break;
14735                 }
14736                 break;
14737         case FLASH_5717VENDOR_ST_M_M25PE10:
14738         case FLASH_5717VENDOR_ST_A_M25PE10:
14739         case FLASH_5717VENDOR_ST_M_M45PE10:
14740         case FLASH_5717VENDOR_ST_A_M45PE10:
14741         case FLASH_5717VENDOR_ST_M_M25PE20:
14742         case FLASH_5717VENDOR_ST_A_M25PE20:
14743         case FLASH_5717VENDOR_ST_M_M45PE20:
14744         case FLASH_5717VENDOR_ST_A_M45PE20:
14745         case FLASH_5717VENDOR_ST_25USPT:
14746         case FLASH_5717VENDOR_ST_45USPT:
14747                 tp->nvram_jedecnum = JEDEC_ST;
14748                 tg3_flag_set(tp, NVRAM_BUFFERED);
14749                 tg3_flag_set(tp, FLASH);
14750
14751                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14752                 case FLASH_5717VENDOR_ST_M_M25PE20:
14753                 case FLASH_5717VENDOR_ST_M_M45PE20:
14754                         /* Detect size with tg3_nvram_get_size() */
14755                         break;
14756                 case FLASH_5717VENDOR_ST_A_M25PE20:
14757                 case FLASH_5717VENDOR_ST_A_M45PE20:
14758                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14759                         break;
14760                 default:
14761                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14762                         break;
14763                 }
14764                 break;
14765         default:
14766                 tg3_flag_set(tp, NO_NVRAM);
14767                 return;
14768         }
14769
14770         tg3_nvram_get_pagesize(tp, nvcfg1);
14771         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14772                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14773 }
14774
14775 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14776 {
14777         u32 nvcfg1, nvmpinstrp, nv_status;
14778
14779         nvcfg1 = tr32(NVRAM_CFG1);
14780         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14781
14782         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14783                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14784                         tg3_flag_set(tp, NO_NVRAM);
14785                         return;
14786                 }
14787
14788                 switch (nvmpinstrp) {
14789                 case FLASH_5762_MX25L_100:
14790                 case FLASH_5762_MX25L_200:
14791                 case FLASH_5762_MX25L_400:
14792                 case FLASH_5762_MX25L_800:
14793                 case FLASH_5762_MX25L_160_320:
14794                         tp->nvram_pagesize = 4096;
14795                         tp->nvram_jedecnum = JEDEC_MACRONIX;
14796                         tg3_flag_set(tp, NVRAM_BUFFERED);
14797                         tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14798                         tg3_flag_set(tp, FLASH);
14799                         nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14800                         tp->nvram_size =
14801                                 (1 << (nv_status >> AUTOSENSE_DEVID &
14802                                                 AUTOSENSE_DEVID_MASK)
14803                                         << AUTOSENSE_SIZE_IN_MB);
14804                         return;
14805
14806                 case FLASH_5762_EEPROM_HD:
14807                         nvmpinstrp = FLASH_5720_EEPROM_HD;
14808                         break;
14809                 case FLASH_5762_EEPROM_LD:
14810                         nvmpinstrp = FLASH_5720_EEPROM_LD;
14811                         break;
14812                 case FLASH_5720VENDOR_M_ST_M45PE20:
14813                         /* This pinstrap supports multiple sizes, so force it
14814                          * to read the actual size from location 0xf0.
14815                          */
14816                         nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14817                         break;
14818                 }
14819         }
14820
14821         switch (nvmpinstrp) {
14822         case FLASH_5720_EEPROM_HD:
14823         case FLASH_5720_EEPROM_LD:
14824                 tp->nvram_jedecnum = JEDEC_ATMEL;
14825                 tg3_flag_set(tp, NVRAM_BUFFERED);
14826
14827                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14828                 tw32(NVRAM_CFG1, nvcfg1);
14829                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14830                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14831                 else
14832                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14833                 return;
14834         case FLASH_5720VENDOR_M_ATMEL_DB011D:
14835         case FLASH_5720VENDOR_A_ATMEL_DB011B:
14836         case FLASH_5720VENDOR_A_ATMEL_DB011D:
14837         case FLASH_5720VENDOR_M_ATMEL_DB021D:
14838         case FLASH_5720VENDOR_A_ATMEL_DB021B:
14839         case FLASH_5720VENDOR_A_ATMEL_DB021D:
14840         case FLASH_5720VENDOR_M_ATMEL_DB041D:
14841         case FLASH_5720VENDOR_A_ATMEL_DB041B:
14842         case FLASH_5720VENDOR_A_ATMEL_DB041D:
14843         case FLASH_5720VENDOR_M_ATMEL_DB081D:
14844         case FLASH_5720VENDOR_A_ATMEL_DB081D:
14845         case FLASH_5720VENDOR_ATMEL_45USPT:
14846                 tp->nvram_jedecnum = JEDEC_ATMEL;
14847                 tg3_flag_set(tp, NVRAM_BUFFERED);
14848                 tg3_flag_set(tp, FLASH);
14849
14850                 switch (nvmpinstrp) {
14851                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14852                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14853                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14854                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14855                         break;
14856                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14857                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14858                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14859                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14860                         break;
14861                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14862                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14863                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14864                         break;
14865                 default:
14866                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14867                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14868                         break;
14869                 }
14870                 break;
14871         case FLASH_5720VENDOR_M_ST_M25PE10:
14872         case FLASH_5720VENDOR_M_ST_M45PE10:
14873         case FLASH_5720VENDOR_A_ST_M25PE10:
14874         case FLASH_5720VENDOR_A_ST_M45PE10:
14875         case FLASH_5720VENDOR_M_ST_M25PE20:
14876         case FLASH_5720VENDOR_M_ST_M45PE20:
14877         case FLASH_5720VENDOR_A_ST_M25PE20:
14878         case FLASH_5720VENDOR_A_ST_M45PE20:
14879         case FLASH_5720VENDOR_M_ST_M25PE40:
14880         case FLASH_5720VENDOR_M_ST_M45PE40:
14881         case FLASH_5720VENDOR_A_ST_M25PE40:
14882         case FLASH_5720VENDOR_A_ST_M45PE40:
14883         case FLASH_5720VENDOR_M_ST_M25PE80:
14884         case FLASH_5720VENDOR_M_ST_M45PE80:
14885         case FLASH_5720VENDOR_A_ST_M25PE80:
14886         case FLASH_5720VENDOR_A_ST_M45PE80:
14887         case FLASH_5720VENDOR_ST_25USPT:
14888         case FLASH_5720VENDOR_ST_45USPT:
14889                 tp->nvram_jedecnum = JEDEC_ST;
14890                 tg3_flag_set(tp, NVRAM_BUFFERED);
14891                 tg3_flag_set(tp, FLASH);
14892
14893                 switch (nvmpinstrp) {
14894                 case FLASH_5720VENDOR_M_ST_M25PE20:
14895                 case FLASH_5720VENDOR_M_ST_M45PE20:
14896                 case FLASH_5720VENDOR_A_ST_M25PE20:
14897                 case FLASH_5720VENDOR_A_ST_M45PE20:
14898                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14899                         break;
14900                 case FLASH_5720VENDOR_M_ST_M25PE40:
14901                 case FLASH_5720VENDOR_M_ST_M45PE40:
14902                 case FLASH_5720VENDOR_A_ST_M25PE40:
14903                 case FLASH_5720VENDOR_A_ST_M45PE40:
14904                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14905                         break;
14906                 case FLASH_5720VENDOR_M_ST_M25PE80:
14907                 case FLASH_5720VENDOR_M_ST_M45PE80:
14908                 case FLASH_5720VENDOR_A_ST_M25PE80:
14909                 case FLASH_5720VENDOR_A_ST_M45PE80:
14910                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14911                         break;
14912                 default:
14913                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14914                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14915                         break;
14916                 }
14917                 break;
14918         default:
14919                 tg3_flag_set(tp, NO_NVRAM);
14920                 return;
14921         }
14922
14923         tg3_nvram_get_pagesize(tp, nvcfg1);
14924         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14925                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14926
14927         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14928                 u32 val;
14929
14930                 if (tg3_nvram_read(tp, 0, &val))
14931                         return;
14932
14933                 if (val != TG3_EEPROM_MAGIC &&
14934                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14935                         tg3_flag_set(tp, NO_NVRAM);
14936         }
14937 }
14938
14939 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14940 static void tg3_nvram_init(struct tg3 *tp)
14941 {
14942         if (tg3_flag(tp, IS_SSB_CORE)) {
14943                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14944                 tg3_flag_clear(tp, NVRAM);
14945                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14946                 tg3_flag_set(tp, NO_NVRAM);
14947                 return;
14948         }
14949
14950         tw32_f(GRC_EEPROM_ADDR,
14951              (EEPROM_ADDR_FSM_RESET |
14952               (EEPROM_DEFAULT_CLOCK_PERIOD <<
14953                EEPROM_ADDR_CLKPERD_SHIFT)));
14954
14955         msleep(1);
14956
14957         /* Enable seeprom accesses. */
14958         tw32_f(GRC_LOCAL_CTRL,
14959              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14960         udelay(100);
14961
14962         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14963             tg3_asic_rev(tp) != ASIC_REV_5701) {
14964                 tg3_flag_set(tp, NVRAM);
14965
14966                 if (tg3_nvram_lock(tp)) {
14967                         netdev_warn(tp->dev,
14968                                     "Cannot get nvram lock, %s failed\n",
14969                                     __func__);
14970                         return;
14971                 }
14972                 tg3_enable_nvram_access(tp);
14973
14974                 tp->nvram_size = 0;
14975
14976                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14977                         tg3_get_5752_nvram_info(tp);
14978                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14979                         tg3_get_5755_nvram_info(tp);
14980                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14981                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
14982                          tg3_asic_rev(tp) == ASIC_REV_5785)
14983                         tg3_get_5787_nvram_info(tp);
14984                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14985                         tg3_get_5761_nvram_info(tp);
14986                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14987                         tg3_get_5906_nvram_info(tp);
14988                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14989                          tg3_flag(tp, 57765_CLASS))
14990                         tg3_get_57780_nvram_info(tp);
14991                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14992                          tg3_asic_rev(tp) == ASIC_REV_5719)
14993                         tg3_get_5717_nvram_info(tp);
14994                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14995                          tg3_asic_rev(tp) == ASIC_REV_5762)
14996                         tg3_get_5720_nvram_info(tp);
14997                 else
14998                         tg3_get_nvram_info(tp);
14999
15000                 if (tp->nvram_size == 0)
15001                         tg3_get_nvram_size(tp);
15002
15003                 tg3_disable_nvram_access(tp);
15004                 tg3_nvram_unlock(tp);
15005
15006         } else {
15007                 tg3_flag_clear(tp, NVRAM);
15008                 tg3_flag_clear(tp, NVRAM_BUFFERED);
15009
15010                 tg3_get_eeprom_size(tp);
15011         }
15012 }
15013
15014 struct subsys_tbl_ent {
15015         u16 subsys_vendor, subsys_devid;
15016         u32 phy_id;
15017 };
15018
15019 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15020         /* Broadcom boards. */
15021         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15022           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15023         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15024           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15025         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15026           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15027         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15028           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15029         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15030           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15031         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15032           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15033         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15034           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15035         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15036           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15037         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15038           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15039         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15040           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15041         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15042           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15043
15044         /* 3com boards. */
15045         { TG3PCI_SUBVENDOR_ID_3COM,
15046           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15047         { TG3PCI_SUBVENDOR_ID_3COM,
15048           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15049         { TG3PCI_SUBVENDOR_ID_3COM,
15050           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15051         { TG3PCI_SUBVENDOR_ID_3COM,
15052           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15053         { TG3PCI_SUBVENDOR_ID_3COM,
15054           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15055
15056         /* DELL boards. */
15057         { TG3PCI_SUBVENDOR_ID_DELL,
15058           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15059         { TG3PCI_SUBVENDOR_ID_DELL,
15060           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15061         { TG3PCI_SUBVENDOR_ID_DELL,
15062           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15063         { TG3PCI_SUBVENDOR_ID_DELL,
15064           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15065
15066         /* Compaq boards. */
15067         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15068           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15069         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15070           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15071         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15072           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15073         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15074           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15075         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15076           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15077
15078         /* IBM boards. */
15079         { TG3PCI_SUBVENDOR_ID_IBM,
15080           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15081 };
15082
15083 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15084 {
15085         int i;
15086
15087         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15088                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15089                      tp->pdev->subsystem_vendor) &&
15090                     (subsys_id_to_phy_id[i].subsys_devid ==
15091                      tp->pdev->subsystem_device))
15092                         return &subsys_id_to_phy_id[i];
15093         }
15094         return NULL;
15095 }
15096
15097 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15098 {
15099         u32 val;
15100
15101         tp->phy_id = TG3_PHY_ID_INVALID;
15102         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15103
15104         /* Assume an onboard device and WOL capable by default.  */
15105         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15106         tg3_flag_set(tp, WOL_CAP);
15107
15108         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15109                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15110                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15111                         tg3_flag_set(tp, IS_NIC);
15112                 }
15113                 val = tr32(VCPU_CFGSHDW);
15114                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15115                         tg3_flag_set(tp, ASPM_WORKAROUND);
15116                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15117                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15118                         tg3_flag_set(tp, WOL_ENABLE);
15119                         device_set_wakeup_enable(&tp->pdev->dev, true);
15120                 }
15121                 goto done;
15122         }
15123
15124         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15125         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15126                 u32 nic_cfg, led_cfg;
15127                 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15128                 u32 nic_phy_id, ver, eeprom_phy_id;
15129                 int eeprom_phy_serdes = 0;
15130
15131                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15132                 tp->nic_sram_data_cfg = nic_cfg;
15133
15134                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15135                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15136                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15137                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
15138                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
15139                     (ver > 0) && (ver < 0x100))
15140                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15141
15142                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15143                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15144
15145                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15146                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
15147                     tg3_asic_rev(tp) == ASIC_REV_5720)
15148                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15149
15150                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15151                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15152                         eeprom_phy_serdes = 1;
15153
15154                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15155                 if (nic_phy_id != 0) {
15156                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15157                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15158
15159                         eeprom_phy_id  = (id1 >> 16) << 10;
15160                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
15161                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15162                 } else
15163                         eeprom_phy_id = 0;
15164
15165                 tp->phy_id = eeprom_phy_id;
15166                 if (eeprom_phy_serdes) {
15167                         if (!tg3_flag(tp, 5705_PLUS))
15168                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15169                         else
15170                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15171                 }
15172
15173                 if (tg3_flag(tp, 5750_PLUS))
15174                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15175                                     SHASTA_EXT_LED_MODE_MASK);
15176                 else
15177                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15178
15179                 switch (led_cfg) {
15180                 default:
15181                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15182                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15183                         break;
15184
15185                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15186                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15187                         break;
15188
15189                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15190                         tp->led_ctrl = LED_CTRL_MODE_MAC;
15191
15192                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15193                          * read on some older 5700/5701 bootcode.
15194                          */
15195                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15196                             tg3_asic_rev(tp) == ASIC_REV_5701)
15197                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15198
15199                         break;
15200
15201                 case SHASTA_EXT_LED_SHARED:
15202                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
15203                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15204                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15205                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15206                                                  LED_CTRL_MODE_PHY_2);
15207
15208                         if (tg3_flag(tp, 5717_PLUS) ||
15209                             tg3_asic_rev(tp) == ASIC_REV_5762)
15210                                 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15211                                                 LED_CTRL_BLINK_RATE_MASK;
15212
15213                         break;
15214
15215                 case SHASTA_EXT_LED_MAC:
15216                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15217                         break;
15218
15219                 case SHASTA_EXT_LED_COMBO:
15220                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
15221                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15222                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15223                                                  LED_CTRL_MODE_PHY_2);
15224                         break;
15225
15226                 }
15227
15228                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15229                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
15230                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15231                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15232
15233                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15234                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15235
15236                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15237                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15238                         if ((tp->pdev->subsystem_vendor ==
15239                              PCI_VENDOR_ID_ARIMA) &&
15240                             (tp->pdev->subsystem_device == 0x205a ||
15241                              tp->pdev->subsystem_device == 0x2063))
15242                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15243                 } else {
15244                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15245                         tg3_flag_set(tp, IS_NIC);
15246                 }
15247
15248                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15249                         tg3_flag_set(tp, ENABLE_ASF);
15250                         if (tg3_flag(tp, 5750_PLUS))
15251                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15252                 }
15253
15254                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15255                     tg3_flag(tp, 5750_PLUS))
15256                         tg3_flag_set(tp, ENABLE_APE);
15257
15258                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15259                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15260                         tg3_flag_clear(tp, WOL_CAP);
15261
15262                 if (tg3_flag(tp, WOL_CAP) &&
15263                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15264                         tg3_flag_set(tp, WOL_ENABLE);
15265                         device_set_wakeup_enable(&tp->pdev->dev, true);
15266                 }
15267
15268                 if (cfg2 & (1 << 17))
15269                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15270
15271                 /* serdes signal pre-emphasis in register 0x590 set by */
15272                 /* bootcode if bit 18 is set */
15273                 if (cfg2 & (1 << 18))
15274                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15275
15276                 if ((tg3_flag(tp, 57765_PLUS) ||
15277                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15278                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15279                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15280                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15281
15282                 if (tg3_flag(tp, PCI_EXPRESS)) {
15283                         u32 cfg3;
15284
15285                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15286                         if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15287                             !tg3_flag(tp, 57765_PLUS) &&
15288                             (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15289                                 tg3_flag_set(tp, ASPM_WORKAROUND);
15290                         if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15291                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15292                         if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15293                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15294                 }
15295
15296                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15297                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15298                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15299                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15300                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15301                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15302
15303                 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15304                         tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15305         }
15306 done:
15307         if (tg3_flag(tp, WOL_CAP))
15308                 device_set_wakeup_enable(&tp->pdev->dev,
15309                                          tg3_flag(tp, WOL_ENABLE));
15310         else
15311                 device_set_wakeup_capable(&tp->pdev->dev, false);
15312 }
15313
15314 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15315 {
15316         int i, err;
15317         u32 val2, off = offset * 8;
15318
15319         err = tg3_nvram_lock(tp);
15320         if (err)
15321                 return err;
15322
15323         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15324         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15325                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15326         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15327         udelay(10);
15328
15329         for (i = 0; i < 100; i++) {
15330                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15331                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15332                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15333                         break;
15334                 }
15335                 udelay(10);
15336         }
15337
15338         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15339
15340         tg3_nvram_unlock(tp);
15341         if (val2 & APE_OTP_STATUS_CMD_DONE)
15342                 return 0;
15343
15344         return -EBUSY;
15345 }
15346
15347 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15348 {
15349         int i;
15350         u32 val;
15351
15352         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15353         tw32(OTP_CTRL, cmd);
15354
15355         /* Wait for up to 1 ms for command to execute. */
15356         for (i = 0; i < 100; i++) {
15357                 val = tr32(OTP_STATUS);
15358                 if (val & OTP_STATUS_CMD_DONE)
15359                         break;
15360                 udelay(10);
15361         }
15362
15363         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15364 }
15365
15366 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15367  * configuration is a 32-bit value that straddles the alignment boundary.
15368  * We do two 32-bit reads and then shift and merge the results.
15369  */
15370 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15371 {
15372         u32 bhalf_otp, thalf_otp;
15373
15374         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15375
15376         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15377                 return 0;
15378
15379         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15380
15381         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15382                 return 0;
15383
15384         thalf_otp = tr32(OTP_READ_DATA);
15385
15386         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15387
15388         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15389                 return 0;
15390
15391         bhalf_otp = tr32(OTP_READ_DATA);
15392
15393         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15394 }
15395
15396 static void tg3_phy_init_link_config(struct tg3 *tp)
15397 {
15398         u32 adv = ADVERTISED_Autoneg;
15399
15400         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15401                 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15402                         adv |= ADVERTISED_1000baseT_Half;
15403                 adv |= ADVERTISED_1000baseT_Full;
15404         }
15405
15406         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15407                 adv |= ADVERTISED_100baseT_Half |
15408                        ADVERTISED_100baseT_Full |
15409                        ADVERTISED_10baseT_Half |
15410                        ADVERTISED_10baseT_Full |
15411                        ADVERTISED_TP;
15412         else
15413                 adv |= ADVERTISED_FIBRE;
15414
15415         tp->link_config.advertising = adv;
15416         tp->link_config.speed = SPEED_UNKNOWN;
15417         tp->link_config.duplex = DUPLEX_UNKNOWN;
15418         tp->link_config.autoneg = AUTONEG_ENABLE;
15419         tp->link_config.active_speed = SPEED_UNKNOWN;
15420         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15421
15422         tp->old_link = -1;
15423 }
15424
15425 static int tg3_phy_probe(struct tg3 *tp)
15426 {
15427         u32 hw_phy_id_1, hw_phy_id_2;
15428         u32 hw_phy_id, hw_phy_id_masked;
15429         int err;
15430
15431         /* flow control autonegotiation is default behavior */
15432         tg3_flag_set(tp, PAUSE_AUTONEG);
15433         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15434
15435         if (tg3_flag(tp, ENABLE_APE)) {
15436                 switch (tp->pci_fn) {
15437                 case 0:
15438                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15439                         break;
15440                 case 1:
15441                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15442                         break;
15443                 case 2:
15444                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15445                         break;
15446                 case 3:
15447                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15448                         break;
15449                 }
15450         }
15451
15452         if (!tg3_flag(tp, ENABLE_ASF) &&
15453             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15454             !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15455                 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15456                                    TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15457
15458         if (tg3_flag(tp, USE_PHYLIB))
15459                 return tg3_phy_init(tp);
15460
15461         /* Reading the PHY ID register can conflict with ASF
15462          * firmware access to the PHY hardware.
15463          */
15464         err = 0;
15465         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15466                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15467         } else {
15468                 /* Now read the physical PHY_ID from the chip and verify
15469                  * that it is sane.  If it doesn't look good, we fall back
15470                  * to either the hard-coded table based PHY_ID and failing
15471                  * that the value found in the eeprom area.
15472                  */
15473                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15474                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15475
15476                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15477                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15478                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15479
15480                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15481         }
15482
15483         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15484                 tp->phy_id = hw_phy_id;
15485                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15486                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15487                 else
15488                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15489         } else {
15490                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15491                         /* Do nothing, phy ID already set up in
15492                          * tg3_get_eeprom_hw_cfg().
15493                          */
15494                 } else {
15495                         struct subsys_tbl_ent *p;
15496
15497                         /* No eeprom signature?  Try the hardcoded
15498                          * subsys device table.
15499                          */
15500                         p = tg3_lookup_by_subsys(tp);
15501                         if (p) {
15502                                 tp->phy_id = p->phy_id;
15503                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15504                                 /* For now we saw the IDs 0xbc050cd0,
15505                                  * 0xbc050f80 and 0xbc050c30 on devices
15506                                  * connected to an BCM4785 and there are
15507                                  * probably more. Just assume that the phy is
15508                                  * supported when it is connected to a SSB core
15509                                  * for now.
15510                                  */
15511                                 return -ENODEV;
15512                         }
15513
15514                         if (!tp->phy_id ||
15515                             tp->phy_id == TG3_PHY_ID_BCM8002)
15516                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15517                 }
15518         }
15519
15520         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15521             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15522              tg3_asic_rev(tp) == ASIC_REV_5720 ||
15523              tg3_asic_rev(tp) == ASIC_REV_57766 ||
15524              tg3_asic_rev(tp) == ASIC_REV_5762 ||
15525              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15526               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15527              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15528               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15529                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15530
15531                 tp->eee.supported = SUPPORTED_100baseT_Full |
15532                                     SUPPORTED_1000baseT_Full;
15533                 tp->eee.advertised = ADVERTISED_100baseT_Full |
15534                                      ADVERTISED_1000baseT_Full;
15535                 tp->eee.eee_enabled = 1;
15536                 tp->eee.tx_lpi_enabled = 1;
15537                 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15538         }
15539
15540         tg3_phy_init_link_config(tp);
15541
15542         if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15543             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15544             !tg3_flag(tp, ENABLE_APE) &&
15545             !tg3_flag(tp, ENABLE_ASF)) {
15546                 u32 bmsr, dummy;
15547
15548                 tg3_readphy(tp, MII_BMSR, &bmsr);
15549                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15550                     (bmsr & BMSR_LSTATUS))
15551                         goto skip_phy_reset;
15552
15553                 err = tg3_phy_reset(tp);
15554                 if (err)
15555                         return err;
15556
15557                 tg3_phy_set_wirespeed(tp);
15558
15559                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15560                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15561                                             tp->link_config.flowctrl);
15562
15563                         tg3_writephy(tp, MII_BMCR,
15564                                      BMCR_ANENABLE | BMCR_ANRESTART);
15565                 }
15566         }
15567
15568 skip_phy_reset:
15569         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15570                 err = tg3_init_5401phy_dsp(tp);
15571                 if (err)
15572                         return err;
15573
15574                 err = tg3_init_5401phy_dsp(tp);
15575         }
15576
15577         return err;
15578 }
15579
15580 static void tg3_read_vpd(struct tg3 *tp)
15581 {
15582         u8 *vpd_data;
15583         unsigned int len, vpdlen;
15584         int i;
15585
15586         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15587         if (!vpd_data)
15588                 goto out_no_vpd;
15589
15590         i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15591                                          PCI_VPD_RO_KEYWORD_MFR_ID, &len);
15592         if (i < 0)
15593                 goto partno;
15594
15595         if (len != 4 || memcmp(vpd_data + i, "1028", 4))
15596                 goto partno;
15597
15598         i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15599                                          PCI_VPD_RO_KEYWORD_VENDOR0, &len);
15600         if (i < 0)
15601                 goto partno;
15602
15603         memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15604         snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i);
15605
15606 partno:
15607         i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15608                                          PCI_VPD_RO_KEYWORD_PARTNO, &len);
15609         if (i < 0)
15610                 goto out_not_found;
15611
15612         if (len > TG3_BPN_SIZE)
15613                 goto out_not_found;
15614
15615         memcpy(tp->board_part_number, &vpd_data[i], len);
15616
15617 out_not_found:
15618         kfree(vpd_data);
15619         if (tp->board_part_number[0])
15620                 return;
15621
15622 out_no_vpd:
15623         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15624                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15625                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15626                         strcpy(tp->board_part_number, "BCM5717");
15627                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15628                         strcpy(tp->board_part_number, "BCM5718");
15629                 else
15630                         goto nomatch;
15631         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15632                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15633                         strcpy(tp->board_part_number, "BCM57780");
15634                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15635                         strcpy(tp->board_part_number, "BCM57760");
15636                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15637                         strcpy(tp->board_part_number, "BCM57790");
15638                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15639                         strcpy(tp->board_part_number, "BCM57788");
15640                 else
15641                         goto nomatch;
15642         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15643                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15644                         strcpy(tp->board_part_number, "BCM57761");
15645                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15646                         strcpy(tp->board_part_number, "BCM57765");
15647                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15648                         strcpy(tp->board_part_number, "BCM57781");
15649                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15650                         strcpy(tp->board_part_number, "BCM57785");
15651                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15652                         strcpy(tp->board_part_number, "BCM57791");
15653                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15654                         strcpy(tp->board_part_number, "BCM57795");
15655                 else
15656                         goto nomatch;
15657         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15658                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15659                         strcpy(tp->board_part_number, "BCM57762");
15660                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15661                         strcpy(tp->board_part_number, "BCM57766");
15662                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15663                         strcpy(tp->board_part_number, "BCM57782");
15664                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15665                         strcpy(tp->board_part_number, "BCM57786");
15666                 else
15667                         goto nomatch;
15668         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15669                 strcpy(tp->board_part_number, "BCM95906");
15670         } else {
15671 nomatch:
15672                 strcpy(tp->board_part_number, "none");
15673         }
15674 }
15675
15676 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15677 {
15678         u32 val;
15679
15680         if (tg3_nvram_read(tp, offset, &val) ||
15681             (val & 0xfc000000) != 0x0c000000 ||
15682             tg3_nvram_read(tp, offset + 4, &val) ||
15683             val != 0)
15684                 return 0;
15685
15686         return 1;
15687 }
15688
15689 static void tg3_read_bc_ver(struct tg3 *tp)
15690 {
15691         u32 val, offset, start, ver_offset;
15692         int i, dst_off;
15693         bool newver = false;
15694
15695         if (tg3_nvram_read(tp, 0xc, &offset) ||
15696             tg3_nvram_read(tp, 0x4, &start))
15697                 return;
15698
15699         offset = tg3_nvram_logical_addr(tp, offset);
15700
15701         if (tg3_nvram_read(tp, offset, &val))
15702                 return;
15703
15704         if ((val & 0xfc000000) == 0x0c000000) {
15705                 if (tg3_nvram_read(tp, offset + 4, &val))
15706                         return;
15707
15708                 if (val == 0)
15709                         newver = true;
15710         }
15711
15712         dst_off = strlen(tp->fw_ver);
15713
15714         if (newver) {
15715                 if (TG3_VER_SIZE - dst_off < 16 ||
15716                     tg3_nvram_read(tp, offset + 8, &ver_offset))
15717                         return;
15718
15719                 offset = offset + ver_offset - start;
15720                 for (i = 0; i < 16; i += 4) {
15721                         __be32 v;
15722                         if (tg3_nvram_read_be32(tp, offset + i, &v))
15723                                 return;
15724
15725                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15726                 }
15727         } else {
15728                 u32 major, minor;
15729
15730                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15731                         return;
15732
15733                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15734                         TG3_NVM_BCVER_MAJSFT;
15735                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15736                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15737                          "v%d.%02d", major, minor);
15738         }
15739 }
15740
15741 static void tg3_read_hwsb_ver(struct tg3 *tp)
15742 {
15743         u32 val, major, minor;
15744
15745         /* Use native endian representation */
15746         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15747                 return;
15748
15749         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15750                 TG3_NVM_HWSB_CFG1_MAJSFT;
15751         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15752                 TG3_NVM_HWSB_CFG1_MINSFT;
15753
15754         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15755 }
15756
15757 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15758 {
15759         u32 offset, major, minor, build;
15760
15761         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15762
15763         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15764                 return;
15765
15766         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15767         case TG3_EEPROM_SB_REVISION_0:
15768                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15769                 break;
15770         case TG3_EEPROM_SB_REVISION_2:
15771                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15772                 break;
15773         case TG3_EEPROM_SB_REVISION_3:
15774                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15775                 break;
15776         case TG3_EEPROM_SB_REVISION_4:
15777                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15778                 break;
15779         case TG3_EEPROM_SB_REVISION_5:
15780                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15781                 break;
15782         case TG3_EEPROM_SB_REVISION_6:
15783                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15784                 break;
15785         default:
15786                 return;
15787         }
15788
15789         if (tg3_nvram_read(tp, offset, &val))
15790                 return;
15791
15792         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15793                 TG3_EEPROM_SB_EDH_BLD_SHFT;
15794         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15795                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15796         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15797
15798         if (minor > 99 || build > 26)
15799                 return;
15800
15801         offset = strlen(tp->fw_ver);
15802         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15803                  " v%d.%02d", major, minor);
15804
15805         if (build > 0) {
15806                 offset = strlen(tp->fw_ver);
15807                 if (offset < TG3_VER_SIZE - 1)
15808                         tp->fw_ver[offset] = 'a' + build - 1;
15809         }
15810 }
15811
15812 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15813 {
15814         u32 val, offset, start;
15815         int i, vlen;
15816
15817         for (offset = TG3_NVM_DIR_START;
15818              offset < TG3_NVM_DIR_END;
15819              offset += TG3_NVM_DIRENT_SIZE) {
15820                 if (tg3_nvram_read(tp, offset, &val))
15821                         return;
15822
15823                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15824                         break;
15825         }
15826
15827         if (offset == TG3_NVM_DIR_END)
15828                 return;
15829
15830         if (!tg3_flag(tp, 5705_PLUS))
15831                 start = 0x08000000;
15832         else if (tg3_nvram_read(tp, offset - 4, &start))
15833                 return;
15834
15835         if (tg3_nvram_read(tp, offset + 4, &offset) ||
15836             !tg3_fw_img_is_valid(tp, offset) ||
15837             tg3_nvram_read(tp, offset + 8, &val))
15838                 return;
15839
15840         offset += val - start;
15841
15842         vlen = strlen(tp->fw_ver);
15843
15844         tp->fw_ver[vlen++] = ',';
15845         tp->fw_ver[vlen++] = ' ';
15846
15847         for (i = 0; i < 4; i++) {
15848                 __be32 v;
15849                 if (tg3_nvram_read_be32(tp, offset, &v))
15850                         return;
15851
15852                 offset += sizeof(v);
15853
15854                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15855                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15856                         break;
15857                 }
15858
15859                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15860                 vlen += sizeof(v);
15861         }
15862 }
15863
15864 static void tg3_probe_ncsi(struct tg3 *tp)
15865 {
15866         u32 apedata;
15867
15868         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15869         if (apedata != APE_SEG_SIG_MAGIC)
15870                 return;
15871
15872         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15873         if (!(apedata & APE_FW_STATUS_READY))
15874                 return;
15875
15876         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15877                 tg3_flag_set(tp, APE_HAS_NCSI);
15878 }
15879
15880 static void tg3_read_dash_ver(struct tg3 *tp)
15881 {
15882         int vlen;
15883         u32 apedata;
15884         char *fwtype;
15885
15886         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15887
15888         if (tg3_flag(tp, APE_HAS_NCSI))
15889                 fwtype = "NCSI";
15890         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15891                 fwtype = "SMASH";
15892         else
15893                 fwtype = "DASH";
15894
15895         vlen = strlen(tp->fw_ver);
15896
15897         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15898                  fwtype,
15899                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15900                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15901                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15902                  (apedata & APE_FW_VERSION_BLDMSK));
15903 }
15904
15905 static void tg3_read_otp_ver(struct tg3 *tp)
15906 {
15907         u32 val, val2;
15908
15909         if (tg3_asic_rev(tp) != ASIC_REV_5762)
15910                 return;
15911
15912         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15913             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15914             TG3_OTP_MAGIC0_VALID(val)) {
15915                 u64 val64 = (u64) val << 32 | val2;
15916                 u32 ver = 0;
15917                 int i, vlen;
15918
15919                 for (i = 0; i < 7; i++) {
15920                         if ((val64 & 0xff) == 0)
15921                                 break;
15922                         ver = val64 & 0xff;
15923                         val64 >>= 8;
15924                 }
15925                 vlen = strlen(tp->fw_ver);
15926                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15927         }
15928 }
15929
15930 static void tg3_read_fw_ver(struct tg3 *tp)
15931 {
15932         u32 val;
15933         bool vpd_vers = false;
15934
15935         if (tp->fw_ver[0] != 0)
15936                 vpd_vers = true;
15937
15938         if (tg3_flag(tp, NO_NVRAM)) {
15939                 strcat(tp->fw_ver, "sb");
15940                 tg3_read_otp_ver(tp);
15941                 return;
15942         }
15943
15944         if (tg3_nvram_read(tp, 0, &val))
15945                 return;
15946
15947         if (val == TG3_EEPROM_MAGIC)
15948                 tg3_read_bc_ver(tp);
15949         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15950                 tg3_read_sb_ver(tp, val);
15951         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15952                 tg3_read_hwsb_ver(tp);
15953
15954         if (tg3_flag(tp, ENABLE_ASF)) {
15955                 if (tg3_flag(tp, ENABLE_APE)) {
15956                         tg3_probe_ncsi(tp);
15957                         if (!vpd_vers)
15958                                 tg3_read_dash_ver(tp);
15959                 } else if (!vpd_vers) {
15960                         tg3_read_mgmtfw_ver(tp);
15961                 }
15962         }
15963
15964         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15965 }
15966
15967 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15968 {
15969         if (tg3_flag(tp, LRG_PROD_RING_CAP))
15970                 return TG3_RX_RET_MAX_SIZE_5717;
15971         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15972                 return TG3_RX_RET_MAX_SIZE_5700;
15973         else
15974                 return TG3_RX_RET_MAX_SIZE_5705;
15975 }
15976
15977 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
15978         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15979         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15980         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15981         { },
15982 };
15983
15984 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15985 {
15986         struct pci_dev *peer;
15987         unsigned int func, devnr = tp->pdev->devfn & ~7;
15988
15989         for (func = 0; func < 8; func++) {
15990                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15991                 if (peer && peer != tp->pdev)
15992                         break;
15993                 pci_dev_put(peer);
15994         }
15995         /* 5704 can be configured in single-port mode, set peer to
15996          * tp->pdev in that case.
15997          */
15998         if (!peer) {
15999                 peer = tp->pdev;
16000                 return peer;
16001         }
16002
16003         /*
16004          * We don't need to keep the refcount elevated; there's no way
16005          * to remove one half of this device without removing the other
16006          */
16007         pci_dev_put(peer);
16008
16009         return peer;
16010 }
16011
16012 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16013 {
16014         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16015         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16016                 u32 reg;
16017
16018                 /* All devices that use the alternate
16019                  * ASIC REV location have a CPMU.
16020                  */
16021                 tg3_flag_set(tp, CPMU_PRESENT);
16022
16023                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16024                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16025                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16026                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16027                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16028                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16029                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16030                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16031                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16032                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16033                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16034                         reg = TG3PCI_GEN2_PRODID_ASICREV;
16035                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16036                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16037                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16038                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16039                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16040                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16041                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16042                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16043                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16044                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16045                         reg = TG3PCI_GEN15_PRODID_ASICREV;
16046                 else
16047                         reg = TG3PCI_PRODID_ASICREV;
16048
16049                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16050         }
16051
16052         /* Wrong chip ID in 5752 A0. This code can be removed later
16053          * as A0 is not in production.
16054          */
16055         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16056                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16057
16058         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16059                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16060
16061         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16062             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16063             tg3_asic_rev(tp) == ASIC_REV_5720)
16064                 tg3_flag_set(tp, 5717_PLUS);
16065
16066         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16067             tg3_asic_rev(tp) == ASIC_REV_57766)
16068                 tg3_flag_set(tp, 57765_CLASS);
16069
16070         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16071              tg3_asic_rev(tp) == ASIC_REV_5762)
16072                 tg3_flag_set(tp, 57765_PLUS);
16073
16074         /* Intentionally exclude ASIC_REV_5906 */
16075         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16076             tg3_asic_rev(tp) == ASIC_REV_5787 ||
16077             tg3_asic_rev(tp) == ASIC_REV_5784 ||
16078             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16079             tg3_asic_rev(tp) == ASIC_REV_5785 ||
16080             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16081             tg3_flag(tp, 57765_PLUS))
16082                 tg3_flag_set(tp, 5755_PLUS);
16083
16084         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16085             tg3_asic_rev(tp) == ASIC_REV_5714)
16086                 tg3_flag_set(tp, 5780_CLASS);
16087
16088         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16089             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16090             tg3_asic_rev(tp) == ASIC_REV_5906 ||
16091             tg3_flag(tp, 5755_PLUS) ||
16092             tg3_flag(tp, 5780_CLASS))
16093                 tg3_flag_set(tp, 5750_PLUS);
16094
16095         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16096             tg3_flag(tp, 5750_PLUS))
16097                 tg3_flag_set(tp, 5705_PLUS);
16098 }
16099
16100 static bool tg3_10_100_only_device(struct tg3 *tp,
16101                                    const struct pci_device_id *ent)
16102 {
16103         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16104
16105         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16106              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16107             (tp->phy_flags & TG3_PHYFLG_IS_FET))
16108                 return true;
16109
16110         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16111                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16112                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16113                                 return true;
16114                 } else {
16115                         return true;
16116                 }
16117         }
16118
16119         return false;
16120 }
16121
16122 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16123 {
16124         u32 misc_ctrl_reg;
16125         u32 pci_state_reg, grc_misc_cfg;
16126         u32 val;
16127         u16 pci_cmd;
16128         int err;
16129
16130         /* Force memory write invalidate off.  If we leave it on,
16131          * then on 5700_BX chips we have to enable a workaround.
16132          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16133          * to match the cacheline size.  The Broadcom driver have this
16134          * workaround but turns MWI off all the times so never uses
16135          * it.  This seems to suggest that the workaround is insufficient.
16136          */
16137         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16138         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16139         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16140
16141         /* Important! -- Make sure register accesses are byteswapped
16142          * correctly.  Also, for those chips that require it, make
16143          * sure that indirect register accesses are enabled before
16144          * the first operation.
16145          */
16146         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16147                               &misc_ctrl_reg);
16148         tp->misc_host_ctrl |= (misc_ctrl_reg &
16149                                MISC_HOST_CTRL_CHIPREV);
16150         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16151                                tp->misc_host_ctrl);
16152
16153         tg3_detect_asic_rev(tp, misc_ctrl_reg);
16154
16155         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16156          * we need to disable memory and use config. cycles
16157          * only to access all registers. The 5702/03 chips
16158          * can mistakenly decode the special cycles from the
16159          * ICH chipsets as memory write cycles, causing corruption
16160          * of register and memory space. Only certain ICH bridges
16161          * will drive special cycles with non-zero data during the
16162          * address phase which can fall within the 5703's address
16163          * range. This is not an ICH bug as the PCI spec allows
16164          * non-zero address during special cycles. However, only
16165          * these ICH bridges are known to drive non-zero addresses
16166          * during special cycles.
16167          *
16168          * Since special cycles do not cross PCI bridges, we only
16169          * enable this workaround if the 5703 is on the secondary
16170          * bus of these ICH bridges.
16171          */
16172         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16173             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16174                 static struct tg3_dev_id {
16175                         u32     vendor;
16176                         u32     device;
16177                         u32     rev;
16178                 } ich_chipsets[] = {
16179                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16180                           PCI_ANY_ID },
16181                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16182                           PCI_ANY_ID },
16183                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16184                           0xa },
16185                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16186                           PCI_ANY_ID },
16187                         { },
16188                 };
16189                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16190                 struct pci_dev *bridge = NULL;
16191
16192                 while (pci_id->vendor != 0) {
16193                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
16194                                                 bridge);
16195                         if (!bridge) {
16196                                 pci_id++;
16197                                 continue;
16198                         }
16199                         if (pci_id->rev != PCI_ANY_ID) {
16200                                 if (bridge->revision > pci_id->rev)
16201                                         continue;
16202                         }
16203                         if (bridge->subordinate &&
16204                             (bridge->subordinate->number ==
16205                              tp->pdev->bus->number)) {
16206                                 tg3_flag_set(tp, ICH_WORKAROUND);
16207                                 pci_dev_put(bridge);
16208                                 break;
16209                         }
16210                 }
16211         }
16212
16213         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16214                 static struct tg3_dev_id {
16215                         u32     vendor;
16216                         u32     device;
16217                 } bridge_chipsets[] = {
16218                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16219                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16220                         { },
16221                 };
16222                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16223                 struct pci_dev *bridge = NULL;
16224
16225                 while (pci_id->vendor != 0) {
16226                         bridge = pci_get_device(pci_id->vendor,
16227                                                 pci_id->device,
16228                                                 bridge);
16229                         if (!bridge) {
16230                                 pci_id++;
16231                                 continue;
16232                         }
16233                         if (bridge->subordinate &&
16234                             (bridge->subordinate->number <=
16235                              tp->pdev->bus->number) &&
16236                             (bridge->subordinate->busn_res.end >=
16237                              tp->pdev->bus->number)) {
16238                                 tg3_flag_set(tp, 5701_DMA_BUG);
16239                                 pci_dev_put(bridge);
16240                                 break;
16241                         }
16242                 }
16243         }
16244
16245         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16246          * DMA addresses > 40-bit. This bridge may have other additional
16247          * 57xx devices behind it in some 4-port NIC designs for example.
16248          * Any tg3 device found behind the bridge will also need the 40-bit
16249          * DMA workaround.
16250          */
16251         if (tg3_flag(tp, 5780_CLASS)) {
16252                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16253                 tp->msi_cap = tp->pdev->msi_cap;
16254         } else {
16255                 struct pci_dev *bridge = NULL;
16256
16257                 do {
16258                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16259                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
16260                                                 bridge);
16261                         if (bridge && bridge->subordinate &&
16262                             (bridge->subordinate->number <=
16263                              tp->pdev->bus->number) &&
16264                             (bridge->subordinate->busn_res.end >=
16265                              tp->pdev->bus->number)) {
16266                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16267                                 pci_dev_put(bridge);
16268                                 break;
16269                         }
16270                 } while (bridge);
16271         }
16272
16273         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16274             tg3_asic_rev(tp) == ASIC_REV_5714)
16275                 tp->pdev_peer = tg3_find_peer(tp);
16276
16277         /* Determine TSO capabilities */
16278         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16279                 ; /* Do nothing. HW bug. */
16280         else if (tg3_flag(tp, 57765_PLUS))
16281                 tg3_flag_set(tp, HW_TSO_3);
16282         else if (tg3_flag(tp, 5755_PLUS) ||
16283                  tg3_asic_rev(tp) == ASIC_REV_5906)
16284                 tg3_flag_set(tp, HW_TSO_2);
16285         else if (tg3_flag(tp, 5750_PLUS)) {
16286                 tg3_flag_set(tp, HW_TSO_1);
16287                 tg3_flag_set(tp, TSO_BUG);
16288                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16289                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16290                         tg3_flag_clear(tp, TSO_BUG);
16291         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16292                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
16293                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16294                 tg3_flag_set(tp, FW_TSO);
16295                 tg3_flag_set(tp, TSO_BUG);
16296                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16297                         tp->fw_needed = FIRMWARE_TG3TSO5;
16298                 else
16299                         tp->fw_needed = FIRMWARE_TG3TSO;
16300         }
16301
16302         /* Selectively allow TSO based on operating conditions */
16303         if (tg3_flag(tp, HW_TSO_1) ||
16304             tg3_flag(tp, HW_TSO_2) ||
16305             tg3_flag(tp, HW_TSO_3) ||
16306             tg3_flag(tp, FW_TSO)) {
16307                 /* For firmware TSO, assume ASF is disabled.
16308                  * We'll disable TSO later if we discover ASF
16309                  * is enabled in tg3_get_eeprom_hw_cfg().
16310                  */
16311                 tg3_flag_set(tp, TSO_CAPABLE);
16312         } else {
16313                 tg3_flag_clear(tp, TSO_CAPABLE);
16314                 tg3_flag_clear(tp, TSO_BUG);
16315                 tp->fw_needed = NULL;
16316         }
16317
16318         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16319                 tp->fw_needed = FIRMWARE_TG3;
16320
16321         if (tg3_asic_rev(tp) == ASIC_REV_57766)
16322                 tp->fw_needed = FIRMWARE_TG357766;
16323
16324         tp->irq_max = 1;
16325
16326         if (tg3_flag(tp, 5750_PLUS)) {
16327                 tg3_flag_set(tp, SUPPORT_MSI);
16328                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16329                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16330                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16331                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16332                      tp->pdev_peer == tp->pdev))
16333                         tg3_flag_clear(tp, SUPPORT_MSI);
16334
16335                 if (tg3_flag(tp, 5755_PLUS) ||
16336                     tg3_asic_rev(tp) == ASIC_REV_5906) {
16337                         tg3_flag_set(tp, 1SHOT_MSI);
16338                 }
16339
16340                 if (tg3_flag(tp, 57765_PLUS)) {
16341                         tg3_flag_set(tp, SUPPORT_MSIX);
16342                         tp->irq_max = TG3_IRQ_MAX_VECS;
16343                 }
16344         }
16345
16346         tp->txq_max = 1;
16347         tp->rxq_max = 1;
16348         if (tp->irq_max > 1) {
16349                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16350                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16351
16352                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16353                     tg3_asic_rev(tp) == ASIC_REV_5720)
16354                         tp->txq_max = tp->irq_max - 1;
16355         }
16356
16357         if (tg3_flag(tp, 5755_PLUS) ||
16358             tg3_asic_rev(tp) == ASIC_REV_5906)
16359                 tg3_flag_set(tp, SHORT_DMA_BUG);
16360
16361         if (tg3_asic_rev(tp) == ASIC_REV_5719)
16362                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16363
16364         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16365             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16366             tg3_asic_rev(tp) == ASIC_REV_5720 ||
16367             tg3_asic_rev(tp) == ASIC_REV_5762)
16368                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16369
16370         if (tg3_flag(tp, 57765_PLUS) &&
16371             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16372                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16373
16374         if (!tg3_flag(tp, 5705_PLUS) ||
16375             tg3_flag(tp, 5780_CLASS) ||
16376             tg3_flag(tp, USE_JUMBO_BDFLAG))
16377                 tg3_flag_set(tp, JUMBO_CAPABLE);
16378
16379         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16380                               &pci_state_reg);
16381
16382         if (pci_is_pcie(tp->pdev)) {
16383                 u16 lnkctl;
16384
16385                 tg3_flag_set(tp, PCI_EXPRESS);
16386
16387                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16388                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16389                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16390                                 tg3_flag_clear(tp, HW_TSO_2);
16391                                 tg3_flag_clear(tp, TSO_CAPABLE);
16392                         }
16393                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16394                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16395                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16396                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16397                                 tg3_flag_set(tp, CLKREQ_BUG);
16398                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16399                         tg3_flag_set(tp, L1PLLPD_EN);
16400                 }
16401         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16402                 /* BCM5785 devices are effectively PCIe devices, and should
16403                  * follow PCIe codepaths, but do not have a PCIe capabilities
16404                  * section.
16405                  */
16406                 tg3_flag_set(tp, PCI_EXPRESS);
16407         } else if (!tg3_flag(tp, 5705_PLUS) ||
16408                    tg3_flag(tp, 5780_CLASS)) {
16409                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16410                 if (!tp->pcix_cap) {
16411                         dev_err(&tp->pdev->dev,
16412                                 "Cannot find PCI-X capability, aborting\n");
16413                         return -EIO;
16414                 }
16415
16416                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16417                         tg3_flag_set(tp, PCIX_MODE);
16418         }
16419
16420         /* If we have an AMD 762 or VIA K8T800 chipset, write
16421          * reordering to the mailbox registers done by the host
16422          * controller can cause major troubles.  We read back from
16423          * every mailbox register write to force the writes to be
16424          * posted to the chip in order.
16425          */
16426         if (pci_dev_present(tg3_write_reorder_chipsets) &&
16427             !tg3_flag(tp, PCI_EXPRESS))
16428                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16429
16430         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16431                              &tp->pci_cacheline_sz);
16432         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16433                              &tp->pci_lat_timer);
16434         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16435             tp->pci_lat_timer < 64) {
16436                 tp->pci_lat_timer = 64;
16437                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16438                                       tp->pci_lat_timer);
16439         }
16440
16441         /* Important! -- It is critical that the PCI-X hw workaround
16442          * situation is decided before the first MMIO register access.
16443          */
16444         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16445                 /* 5700 BX chips need to have their TX producer index
16446                  * mailboxes written twice to workaround a bug.
16447                  */
16448                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16449
16450                 /* If we are in PCI-X mode, enable register write workaround.
16451                  *
16452                  * The workaround is to use indirect register accesses
16453                  * for all chip writes not to mailbox registers.
16454                  */
16455                 if (tg3_flag(tp, PCIX_MODE)) {
16456                         u32 pm_reg;
16457
16458                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16459
16460                         /* The chip can have it's power management PCI config
16461                          * space registers clobbered due to this bug.
16462                          * So explicitly force the chip into D0 here.
16463                          */
16464                         pci_read_config_dword(tp->pdev,
16465                                               tp->pdev->pm_cap + PCI_PM_CTRL,
16466                                               &pm_reg);
16467                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16468                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16469                         pci_write_config_dword(tp->pdev,
16470                                                tp->pdev->pm_cap + PCI_PM_CTRL,
16471                                                pm_reg);
16472
16473                         /* Also, force SERR#/PERR# in PCI command. */
16474                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16475                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16476                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16477                 }
16478         }
16479
16480         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16481                 tg3_flag_set(tp, PCI_HIGH_SPEED);
16482         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16483                 tg3_flag_set(tp, PCI_32BIT);
16484
16485         /* Chip-specific fixup from Broadcom driver */
16486         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16487             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16488                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16489                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16490         }
16491
16492         /* Default fast path register access methods */
16493         tp->read32 = tg3_read32;
16494         tp->write32 = tg3_write32;
16495         tp->read32_mbox = tg3_read32;
16496         tp->write32_mbox = tg3_write32;
16497         tp->write32_tx_mbox = tg3_write32;
16498         tp->write32_rx_mbox = tg3_write32;
16499
16500         /* Various workaround register access methods */
16501         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16502                 tp->write32 = tg3_write_indirect_reg32;
16503         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16504                  (tg3_flag(tp, PCI_EXPRESS) &&
16505                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16506                 /*
16507                  * Back to back register writes can cause problems on these
16508                  * chips, the workaround is to read back all reg writes
16509                  * except those to mailbox regs.
16510                  *
16511                  * See tg3_write_indirect_reg32().
16512                  */
16513                 tp->write32 = tg3_write_flush_reg32;
16514         }
16515
16516         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16517                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16518                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16519                         tp->write32_rx_mbox = tg3_write_flush_reg32;
16520         }
16521
16522         if (tg3_flag(tp, ICH_WORKAROUND)) {
16523                 tp->read32 = tg3_read_indirect_reg32;
16524                 tp->write32 = tg3_write_indirect_reg32;
16525                 tp->read32_mbox = tg3_read_indirect_mbox;
16526                 tp->write32_mbox = tg3_write_indirect_mbox;
16527                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16528                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16529
16530                 iounmap(tp->regs);
16531                 tp->regs = NULL;
16532
16533                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16534                 pci_cmd &= ~PCI_COMMAND_MEMORY;
16535                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16536         }
16537         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16538                 tp->read32_mbox = tg3_read32_mbox_5906;
16539                 tp->write32_mbox = tg3_write32_mbox_5906;
16540                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16541                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16542         }
16543
16544         if (tp->write32 == tg3_write_indirect_reg32 ||
16545             (tg3_flag(tp, PCIX_MODE) &&
16546              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16547               tg3_asic_rev(tp) == ASIC_REV_5701)))
16548                 tg3_flag_set(tp, SRAM_USE_CONFIG);
16549
16550         /* The memory arbiter has to be enabled in order for SRAM accesses
16551          * to succeed.  Normally on powerup the tg3 chip firmware will make
16552          * sure it is enabled, but other entities such as system netboot
16553          * code might disable it.
16554          */
16555         val = tr32(MEMARB_MODE);
16556         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16557
16558         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16559         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16560             tg3_flag(tp, 5780_CLASS)) {
16561                 if (tg3_flag(tp, PCIX_MODE)) {
16562                         pci_read_config_dword(tp->pdev,
16563                                               tp->pcix_cap + PCI_X_STATUS,
16564                                               &val);
16565                         tp->pci_fn = val & 0x7;
16566                 }
16567         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16568                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16569                    tg3_asic_rev(tp) == ASIC_REV_5720) {
16570                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16571                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16572                         val = tr32(TG3_CPMU_STATUS);
16573
16574                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16575                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16576                 else
16577                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16578                                      TG3_CPMU_STATUS_FSHFT_5719;
16579         }
16580
16581         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16582                 tp->write32_tx_mbox = tg3_write_flush_reg32;
16583                 tp->write32_rx_mbox = tg3_write_flush_reg32;
16584         }
16585
16586         /* Get eeprom hw config before calling tg3_set_power_state().
16587          * In particular, the TG3_FLAG_IS_NIC flag must be
16588          * determined before calling tg3_set_power_state() so that
16589          * we know whether or not to switch out of Vaux power.
16590          * When the flag is set, it means that GPIO1 is used for eeprom
16591          * write protect and also implies that it is a LOM where GPIOs
16592          * are not used to switch power.
16593          */
16594         tg3_get_eeprom_hw_cfg(tp);
16595
16596         if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16597                 tg3_flag_clear(tp, TSO_CAPABLE);
16598                 tg3_flag_clear(tp, TSO_BUG);
16599                 tp->fw_needed = NULL;
16600         }
16601
16602         if (tg3_flag(tp, ENABLE_APE)) {
16603                 /* Allow reads and writes to the
16604                  * APE register and memory space.
16605                  */
16606                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16607                                  PCISTATE_ALLOW_APE_SHMEM_WR |
16608                                  PCISTATE_ALLOW_APE_PSPACE_WR;
16609                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16610                                        pci_state_reg);
16611
16612                 tg3_ape_lock_init(tp);
16613                 tp->ape_hb_interval =
16614                         msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16615         }
16616
16617         /* Set up tp->grc_local_ctrl before calling
16618          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16619          * will bring 5700's external PHY out of reset.
16620          * It is also used as eeprom write protect on LOMs.
16621          */
16622         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16623         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16624             tg3_flag(tp, EEPROM_WRITE_PROT))
16625                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16626                                        GRC_LCLCTRL_GPIO_OUTPUT1);
16627         /* Unused GPIO3 must be driven as output on 5752 because there
16628          * are no pull-up resistors on unused GPIO pins.
16629          */
16630         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16631                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16632
16633         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16634             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16635             tg3_flag(tp, 57765_CLASS))
16636                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16637
16638         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16639             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16640                 /* Turn off the debug UART. */
16641                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16642                 if (tg3_flag(tp, IS_NIC))
16643                         /* Keep VMain power. */
16644                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16645                                               GRC_LCLCTRL_GPIO_OUTPUT0;
16646         }
16647
16648         if (tg3_asic_rev(tp) == ASIC_REV_5762)
16649                 tp->grc_local_ctrl |=
16650                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16651
16652         /* Switch out of Vaux if it is a NIC */
16653         tg3_pwrsrc_switch_to_vmain(tp);
16654
16655         /* Derive initial jumbo mode from MTU assigned in
16656          * ether_setup() via the alloc_etherdev() call
16657          */
16658         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16659                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16660
16661         /* Determine WakeOnLan speed to use. */
16662         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16663             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16664             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16665             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16666                 tg3_flag_clear(tp, WOL_SPEED_100MB);
16667         } else {
16668                 tg3_flag_set(tp, WOL_SPEED_100MB);
16669         }
16670
16671         if (tg3_asic_rev(tp) == ASIC_REV_5906)
16672                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16673
16674         /* A few boards don't want Ethernet@WireSpeed phy feature */
16675         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16676             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16677              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16678              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16679             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16680             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16681                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16682
16683         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16684             tg3_chip_rev(tp) == CHIPREV_5704_AX)
16685                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16686         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16687                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16688
16689         if (tg3_flag(tp, 5705_PLUS) &&
16690             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16691             tg3_asic_rev(tp) != ASIC_REV_5785 &&
16692             tg3_asic_rev(tp) != ASIC_REV_57780 &&
16693             !tg3_flag(tp, 57765_PLUS)) {
16694                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16695                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
16696                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
16697                     tg3_asic_rev(tp) == ASIC_REV_5761) {
16698                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16699                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16700                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16701                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16702                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16703                 } else
16704                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16705         }
16706
16707         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16708             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16709                 tp->phy_otp = tg3_read_otp_phycfg(tp);
16710                 if (tp->phy_otp == 0)
16711                         tp->phy_otp = TG3_OTP_DEFAULT;
16712         }
16713
16714         if (tg3_flag(tp, CPMU_PRESENT))
16715                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16716         else
16717                 tp->mi_mode = MAC_MI_MODE_BASE;
16718
16719         tp->coalesce_mode = 0;
16720         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16721             tg3_chip_rev(tp) != CHIPREV_5700_BX)
16722                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16723
16724         /* Set these bits to enable statistics workaround. */
16725         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16726             tg3_asic_rev(tp) == ASIC_REV_5762 ||
16727             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16728             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16729                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16730                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16731         }
16732
16733         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16734             tg3_asic_rev(tp) == ASIC_REV_57780)
16735                 tg3_flag_set(tp, USE_PHYLIB);
16736
16737         err = tg3_mdio_init(tp);
16738         if (err)
16739                 return err;
16740
16741         /* Initialize data/descriptor byte/word swapping. */
16742         val = tr32(GRC_MODE);
16743         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16744             tg3_asic_rev(tp) == ASIC_REV_5762)
16745                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16746                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
16747                         GRC_MODE_B2HRX_ENABLE |
16748                         GRC_MODE_HTX2B_ENABLE |
16749                         GRC_MODE_HOST_STACKUP);
16750         else
16751                 val &= GRC_MODE_HOST_STACKUP;
16752
16753         tw32(GRC_MODE, val | tp->grc_mode);
16754
16755         tg3_switch_clocks(tp);
16756
16757         /* Clear this out for sanity. */
16758         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16759
16760         /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16761         tw32(TG3PCI_REG_BASE_ADDR, 0);
16762
16763         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16764                               &pci_state_reg);
16765         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16766             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16767                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16768                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16769                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16770                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16771                         void __iomem *sram_base;
16772
16773                         /* Write some dummy words into the SRAM status block
16774                          * area, see if it reads back correctly.  If the return
16775                          * value is bad, force enable the PCIX workaround.
16776                          */
16777                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16778
16779                         writel(0x00000000, sram_base);
16780                         writel(0x00000000, sram_base + 4);
16781                         writel(0xffffffff, sram_base + 4);
16782                         if (readl(sram_base) != 0x00000000)
16783                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16784                 }
16785         }
16786
16787         udelay(50);
16788         tg3_nvram_init(tp);
16789
16790         /* If the device has an NVRAM, no need to load patch firmware */
16791         if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16792             !tg3_flag(tp, NO_NVRAM))
16793                 tp->fw_needed = NULL;
16794
16795         grc_misc_cfg = tr32(GRC_MISC_CFG);
16796         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16797
16798         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16799             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16800              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16801                 tg3_flag_set(tp, IS_5788);
16802
16803         if (!tg3_flag(tp, IS_5788) &&
16804             tg3_asic_rev(tp) != ASIC_REV_5700)
16805                 tg3_flag_set(tp, TAGGED_STATUS);
16806         if (tg3_flag(tp, TAGGED_STATUS)) {
16807                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16808                                       HOSTCC_MODE_CLRTICK_TXBD);
16809
16810                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16811                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16812                                        tp->misc_host_ctrl);
16813         }
16814
16815         /* Preserve the APE MAC_MODE bits */
16816         if (tg3_flag(tp, ENABLE_APE))
16817                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16818         else
16819                 tp->mac_mode = 0;
16820
16821         if (tg3_10_100_only_device(tp, ent))
16822                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16823
16824         err = tg3_phy_probe(tp);
16825         if (err) {
16826                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16827                 /* ... but do not return immediately ... */
16828                 tg3_mdio_fini(tp);
16829         }
16830
16831         tg3_read_vpd(tp);
16832         tg3_read_fw_ver(tp);
16833
16834         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16835                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16836         } else {
16837                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16838                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16839                 else
16840                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16841         }
16842
16843         /* 5700 {AX,BX} chips have a broken status block link
16844          * change bit implementation, so we must use the
16845          * status register in those cases.
16846          */
16847         if (tg3_asic_rev(tp) == ASIC_REV_5700)
16848                 tg3_flag_set(tp, USE_LINKCHG_REG);
16849         else
16850                 tg3_flag_clear(tp, USE_LINKCHG_REG);
16851
16852         /* The led_ctrl is set during tg3_phy_probe, here we might
16853          * have to force the link status polling mechanism based
16854          * upon subsystem IDs.
16855          */
16856         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16857             tg3_asic_rev(tp) == ASIC_REV_5701 &&
16858             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16859                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16860                 tg3_flag_set(tp, USE_LINKCHG_REG);
16861         }
16862
16863         /* For all SERDES we poll the MAC status register. */
16864         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16865                 tg3_flag_set(tp, POLL_SERDES);
16866         else
16867                 tg3_flag_clear(tp, POLL_SERDES);
16868
16869         if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16870                 tg3_flag_set(tp, POLL_CPMU_LINK);
16871
16872         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16873         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16874         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16875             tg3_flag(tp, PCIX_MODE)) {
16876                 tp->rx_offset = NET_SKB_PAD;
16877 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16878                 tp->rx_copy_thresh = ~(u16)0;
16879 #endif
16880         }
16881
16882         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16883         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16884         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16885
16886         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16887
16888         /* Increment the rx prod index on the rx std ring by at most
16889          * 8 for these chips to workaround hw errata.
16890          */
16891         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16892             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16893             tg3_asic_rev(tp) == ASIC_REV_5755)
16894                 tp->rx_std_max_post = 8;
16895
16896         if (tg3_flag(tp, ASPM_WORKAROUND))
16897                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16898                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
16899
16900         return err;
16901 }
16902
16903 static int tg3_get_device_address(struct tg3 *tp, u8 *addr)
16904 {
16905         u32 hi, lo, mac_offset;
16906         int addr_ok = 0;
16907         int err;
16908
16909         if (!eth_platform_get_mac_address(&tp->pdev->dev, addr))
16910                 return 0;
16911
16912         if (tg3_flag(tp, IS_SSB_CORE)) {
16913                 err = ssb_gige_get_macaddr(tp->pdev, addr);
16914                 if (!err && is_valid_ether_addr(addr))
16915                         return 0;
16916         }
16917
16918         mac_offset = 0x7c;
16919         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16920             tg3_flag(tp, 5780_CLASS)) {
16921                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16922                         mac_offset = 0xcc;
16923                 if (tg3_nvram_lock(tp))
16924                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16925                 else
16926                         tg3_nvram_unlock(tp);
16927         } else if (tg3_flag(tp, 5717_PLUS)) {
16928                 if (tp->pci_fn & 1)
16929                         mac_offset = 0xcc;
16930                 if (tp->pci_fn > 1)
16931                         mac_offset += 0x18c;
16932         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16933                 mac_offset = 0x10;
16934
16935         /* First try to get it from MAC address mailbox. */
16936         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16937         if ((hi >> 16) == 0x484b) {
16938                 addr[0] = (hi >>  8) & 0xff;
16939                 addr[1] = (hi >>  0) & 0xff;
16940
16941                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16942                 addr[2] = (lo >> 24) & 0xff;
16943                 addr[3] = (lo >> 16) & 0xff;
16944                 addr[4] = (lo >>  8) & 0xff;
16945                 addr[5] = (lo >>  0) & 0xff;
16946
16947                 /* Some old bootcode may report a 0 MAC address in SRAM */
16948                 addr_ok = is_valid_ether_addr(addr);
16949         }
16950         if (!addr_ok) {
16951                 /* Next, try NVRAM. */
16952                 if (!tg3_flag(tp, NO_NVRAM) &&
16953                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16954                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16955                         memcpy(&addr[0], ((char *)&hi) + 2, 2);
16956                         memcpy(&addr[2], (char *)&lo, sizeof(lo));
16957                 }
16958                 /* Finally just fetch it out of the MAC control regs. */
16959                 else {
16960                         hi = tr32(MAC_ADDR_0_HIGH);
16961                         lo = tr32(MAC_ADDR_0_LOW);
16962
16963                         addr[5] = lo & 0xff;
16964                         addr[4] = (lo >> 8) & 0xff;
16965                         addr[3] = (lo >> 16) & 0xff;
16966                         addr[2] = (lo >> 24) & 0xff;
16967                         addr[1] = hi & 0xff;
16968                         addr[0] = (hi >> 8) & 0xff;
16969                 }
16970         }
16971
16972         if (!is_valid_ether_addr(addr))
16973                 return -EINVAL;
16974         return 0;
16975 }
16976
16977 #define BOUNDARY_SINGLE_CACHELINE       1
16978 #define BOUNDARY_MULTI_CACHELINE        2
16979
16980 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16981 {
16982         int cacheline_size;
16983         u8 byte;
16984         int goal;
16985
16986         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16987         if (byte == 0)
16988                 cacheline_size = 1024;
16989         else
16990                 cacheline_size = (int) byte * 4;
16991
16992         /* On 5703 and later chips, the boundary bits have no
16993          * effect.
16994          */
16995         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16996             tg3_asic_rev(tp) != ASIC_REV_5701 &&
16997             !tg3_flag(tp, PCI_EXPRESS))
16998                 goto out;
16999
17000 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17001         goal = BOUNDARY_MULTI_CACHELINE;
17002 #else
17003 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17004         goal = BOUNDARY_SINGLE_CACHELINE;
17005 #else
17006         goal = 0;
17007 #endif
17008 #endif
17009
17010         if (tg3_flag(tp, 57765_PLUS)) {
17011                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17012                 goto out;
17013         }
17014
17015         if (!goal)
17016                 goto out;
17017
17018         /* PCI controllers on most RISC systems tend to disconnect
17019          * when a device tries to burst across a cache-line boundary.
17020          * Therefore, letting tg3 do so just wastes PCI bandwidth.
17021          *
17022          * Unfortunately, for PCI-E there are only limited
17023          * write-side controls for this, and thus for reads
17024          * we will still get the disconnects.  We'll also waste
17025          * these PCI cycles for both read and write for chips
17026          * other than 5700 and 5701 which do not implement the
17027          * boundary bits.
17028          */
17029         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17030                 switch (cacheline_size) {
17031                 case 16:
17032                 case 32:
17033                 case 64:
17034                 case 128:
17035                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17036                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17037                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17038                         } else {
17039                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17040                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17041                         }
17042                         break;
17043
17044                 case 256:
17045                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17046                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17047                         break;
17048
17049                 default:
17050                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17051                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17052                         break;
17053                 }
17054         } else if (tg3_flag(tp, PCI_EXPRESS)) {
17055                 switch (cacheline_size) {
17056                 case 16:
17057                 case 32:
17058                 case 64:
17059                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17060                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17061                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17062                                 break;
17063                         }
17064                         fallthrough;
17065                 case 128:
17066                 default:
17067                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17068                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17069                         break;
17070                 }
17071         } else {
17072                 switch (cacheline_size) {
17073                 case 16:
17074                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17075                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17076                                         DMA_RWCTRL_WRITE_BNDRY_16);
17077                                 break;
17078                         }
17079                         fallthrough;
17080                 case 32:
17081                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17082                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17083                                         DMA_RWCTRL_WRITE_BNDRY_32);
17084                                 break;
17085                         }
17086                         fallthrough;
17087                 case 64:
17088                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17089                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17090                                         DMA_RWCTRL_WRITE_BNDRY_64);
17091                                 break;
17092                         }
17093                         fallthrough;
17094                 case 128:
17095                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17096                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17097                                         DMA_RWCTRL_WRITE_BNDRY_128);
17098                                 break;
17099                         }
17100                         fallthrough;
17101                 case 256:
17102                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
17103                                 DMA_RWCTRL_WRITE_BNDRY_256);
17104                         break;
17105                 case 512:
17106                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
17107                                 DMA_RWCTRL_WRITE_BNDRY_512);
17108                         break;
17109                 case 1024:
17110                 default:
17111                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17112                                 DMA_RWCTRL_WRITE_BNDRY_1024);
17113                         break;
17114                 }
17115         }
17116
17117 out:
17118         return val;
17119 }
17120
17121 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17122                            int size, bool to_device)
17123 {
17124         struct tg3_internal_buffer_desc test_desc;
17125         u32 sram_dma_descs;
17126         int i, ret;
17127
17128         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17129
17130         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17131         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17132         tw32(RDMAC_STATUS, 0);
17133         tw32(WDMAC_STATUS, 0);
17134
17135         tw32(BUFMGR_MODE, 0);
17136         tw32(FTQ_RESET, 0);
17137
17138         test_desc.addr_hi = ((u64) buf_dma) >> 32;
17139         test_desc.addr_lo = buf_dma & 0xffffffff;
17140         test_desc.nic_mbuf = 0x00002100;
17141         test_desc.len = size;
17142
17143         /*
17144          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17145          * the *second* time the tg3 driver was getting loaded after an
17146          * initial scan.
17147          *
17148          * Broadcom tells me:
17149          *   ...the DMA engine is connected to the GRC block and a DMA
17150          *   reset may affect the GRC block in some unpredictable way...
17151          *   The behavior of resets to individual blocks has not been tested.
17152          *
17153          * Broadcom noted the GRC reset will also reset all sub-components.
17154          */
17155         if (to_device) {
17156                 test_desc.cqid_sqid = (13 << 8) | 2;
17157
17158                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17159                 udelay(40);
17160         } else {
17161                 test_desc.cqid_sqid = (16 << 8) | 7;
17162
17163                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17164                 udelay(40);
17165         }
17166         test_desc.flags = 0x00000005;
17167
17168         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17169                 u32 val;
17170
17171                 val = *(((u32 *)&test_desc) + i);
17172                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17173                                        sram_dma_descs + (i * sizeof(u32)));
17174                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17175         }
17176         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17177
17178         if (to_device)
17179                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17180         else
17181                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17182
17183         ret = -ENODEV;
17184         for (i = 0; i < 40; i++) {
17185                 u32 val;
17186
17187                 if (to_device)
17188                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17189                 else
17190                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17191                 if ((val & 0xffff) == sram_dma_descs) {
17192                         ret = 0;
17193                         break;
17194                 }
17195
17196                 udelay(100);
17197         }
17198
17199         return ret;
17200 }
17201
17202 #define TEST_BUFFER_SIZE        0x2000
17203
17204 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17205         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17206         { },
17207 };
17208
17209 static int tg3_test_dma(struct tg3 *tp)
17210 {
17211         dma_addr_t buf_dma;
17212         u32 *buf, saved_dma_rwctrl;
17213         int ret = 0;
17214
17215         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17216                                  &buf_dma, GFP_KERNEL);
17217         if (!buf) {
17218                 ret = -ENOMEM;
17219                 goto out_nofree;
17220         }
17221
17222         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17223                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17224
17225         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17226
17227         if (tg3_flag(tp, 57765_PLUS))
17228                 goto out;
17229
17230         if (tg3_flag(tp, PCI_EXPRESS)) {
17231                 /* DMA read watermark not used on PCIE */
17232                 tp->dma_rwctrl |= 0x00180000;
17233         } else if (!tg3_flag(tp, PCIX_MODE)) {
17234                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17235                     tg3_asic_rev(tp) == ASIC_REV_5750)
17236                         tp->dma_rwctrl |= 0x003f0000;
17237                 else
17238                         tp->dma_rwctrl |= 0x003f000f;
17239         } else {
17240                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17241                     tg3_asic_rev(tp) == ASIC_REV_5704) {
17242                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17243                         u32 read_water = 0x7;
17244
17245                         /* If the 5704 is behind the EPB bridge, we can
17246                          * do the less restrictive ONE_DMA workaround for
17247                          * better performance.
17248                          */
17249                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17250                             tg3_asic_rev(tp) == ASIC_REV_5704)
17251                                 tp->dma_rwctrl |= 0x8000;
17252                         else if (ccval == 0x6 || ccval == 0x7)
17253                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17254
17255                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
17256                                 read_water = 4;
17257                         /* Set bit 23 to enable PCIX hw bug fix */
17258                         tp->dma_rwctrl |=
17259                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17260                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17261                                 (1 << 23);
17262                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17263                         /* 5780 always in PCIX mode */
17264                         tp->dma_rwctrl |= 0x00144000;
17265                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17266                         /* 5714 always in PCIX mode */
17267                         tp->dma_rwctrl |= 0x00148000;
17268                 } else {
17269                         tp->dma_rwctrl |= 0x001b000f;
17270                 }
17271         }
17272         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17273                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17274
17275         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17276             tg3_asic_rev(tp) == ASIC_REV_5704)
17277                 tp->dma_rwctrl &= 0xfffffff0;
17278
17279         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17280             tg3_asic_rev(tp) == ASIC_REV_5701) {
17281                 /* Remove this if it causes problems for some boards. */
17282                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17283
17284                 /* On 5700/5701 chips, we need to set this bit.
17285                  * Otherwise the chip will issue cacheline transactions
17286                  * to streamable DMA memory with not all the byte
17287                  * enables turned on.  This is an error on several
17288                  * RISC PCI controllers, in particular sparc64.
17289                  *
17290                  * On 5703/5704 chips, this bit has been reassigned
17291                  * a different meaning.  In particular, it is used
17292                  * on those chips to enable a PCI-X workaround.
17293                  */
17294                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17295         }
17296
17297         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17298
17299
17300         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17301             tg3_asic_rev(tp) != ASIC_REV_5701)
17302                 goto out;
17303
17304         /* It is best to perform DMA test with maximum write burst size
17305          * to expose the 5700/5701 write DMA bug.
17306          */
17307         saved_dma_rwctrl = tp->dma_rwctrl;
17308         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17309         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17310
17311         while (1) {
17312                 u32 *p = buf, i;
17313
17314                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17315                         p[i] = i;
17316
17317                 /* Send the buffer to the chip. */
17318                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17319                 if (ret) {
17320                         dev_err(&tp->pdev->dev,
17321                                 "%s: Buffer write failed. err = %d\n",
17322                                 __func__, ret);
17323                         break;
17324                 }
17325
17326                 /* Now read it back. */
17327                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17328                 if (ret) {
17329                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17330                                 "err = %d\n", __func__, ret);
17331                         break;
17332                 }
17333
17334                 /* Verify it. */
17335                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17336                         if (p[i] == i)
17337                                 continue;
17338
17339                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17340                             DMA_RWCTRL_WRITE_BNDRY_16) {
17341                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17342                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17343                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17344                                 break;
17345                         } else {
17346                                 dev_err(&tp->pdev->dev,
17347                                         "%s: Buffer corrupted on read back! "
17348                                         "(%d != %d)\n", __func__, p[i], i);
17349                                 ret = -ENODEV;
17350                                 goto out;
17351                         }
17352                 }
17353
17354                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17355                         /* Success. */
17356                         ret = 0;
17357                         break;
17358                 }
17359         }
17360         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17361             DMA_RWCTRL_WRITE_BNDRY_16) {
17362                 /* DMA test passed without adjusting DMA boundary,
17363                  * now look for chipsets that are known to expose the
17364                  * DMA bug without failing the test.
17365                  */
17366                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17367                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17368                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17369                 } else {
17370                         /* Safe to use the calculated DMA boundary. */
17371                         tp->dma_rwctrl = saved_dma_rwctrl;
17372                 }
17373
17374                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17375         }
17376
17377 out:
17378         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17379 out_nofree:
17380         return ret;
17381 }
17382
17383 static void tg3_init_bufmgr_config(struct tg3 *tp)
17384 {
17385         if (tg3_flag(tp, 57765_PLUS)) {
17386                 tp->bufmgr_config.mbuf_read_dma_low_water =
17387                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17388                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17389                         DEFAULT_MB_MACRX_LOW_WATER_57765;
17390                 tp->bufmgr_config.mbuf_high_water =
17391                         DEFAULT_MB_HIGH_WATER_57765;
17392
17393                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17394                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17395                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17396                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17397                 tp->bufmgr_config.mbuf_high_water_jumbo =
17398                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17399         } else if (tg3_flag(tp, 5705_PLUS)) {
17400                 tp->bufmgr_config.mbuf_read_dma_low_water =
17401                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17402                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17403                         DEFAULT_MB_MACRX_LOW_WATER_5705;
17404                 tp->bufmgr_config.mbuf_high_water =
17405                         DEFAULT_MB_HIGH_WATER_5705;
17406                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17407                         tp->bufmgr_config.mbuf_mac_rx_low_water =
17408                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
17409                         tp->bufmgr_config.mbuf_high_water =
17410                                 DEFAULT_MB_HIGH_WATER_5906;
17411                 }
17412
17413                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17414                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17415                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17416                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17417                 tp->bufmgr_config.mbuf_high_water_jumbo =
17418                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17419         } else {
17420                 tp->bufmgr_config.mbuf_read_dma_low_water =
17421                         DEFAULT_MB_RDMA_LOW_WATER;
17422                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17423                         DEFAULT_MB_MACRX_LOW_WATER;
17424                 tp->bufmgr_config.mbuf_high_water =
17425                         DEFAULT_MB_HIGH_WATER;
17426
17427                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17428                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17429                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17430                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17431                 tp->bufmgr_config.mbuf_high_water_jumbo =
17432                         DEFAULT_MB_HIGH_WATER_JUMBO;
17433         }
17434
17435         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17436         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17437 }
17438
17439 static char *tg3_phy_string(struct tg3 *tp)
17440 {
17441         switch (tp->phy_id & TG3_PHY_ID_MASK) {
17442         case TG3_PHY_ID_BCM5400:        return "5400";
17443         case TG3_PHY_ID_BCM5401:        return "5401";
17444         case TG3_PHY_ID_BCM5411:        return "5411";
17445         case TG3_PHY_ID_BCM5701:        return "5701";
17446         case TG3_PHY_ID_BCM5703:        return "5703";
17447         case TG3_PHY_ID_BCM5704:        return "5704";
17448         case TG3_PHY_ID_BCM5705:        return "5705";
17449         case TG3_PHY_ID_BCM5750:        return "5750";
17450         case TG3_PHY_ID_BCM5752:        return "5752";
17451         case TG3_PHY_ID_BCM5714:        return "5714";
17452         case TG3_PHY_ID_BCM5780:        return "5780";
17453         case TG3_PHY_ID_BCM5755:        return "5755";
17454         case TG3_PHY_ID_BCM5787:        return "5787";
17455         case TG3_PHY_ID_BCM5784:        return "5784";
17456         case TG3_PHY_ID_BCM5756:        return "5722/5756";
17457         case TG3_PHY_ID_BCM5906:        return "5906";
17458         case TG3_PHY_ID_BCM5761:        return "5761";
17459         case TG3_PHY_ID_BCM5718C:       return "5718C";
17460         case TG3_PHY_ID_BCM5718S:       return "5718S";
17461         case TG3_PHY_ID_BCM57765:       return "57765";
17462         case TG3_PHY_ID_BCM5719C:       return "5719C";
17463         case TG3_PHY_ID_BCM5720C:       return "5720C";
17464         case TG3_PHY_ID_BCM5762:        return "5762C";
17465         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
17466         case 0:                 return "serdes";
17467         default:                return "unknown";
17468         }
17469 }
17470
17471 static char *tg3_bus_string(struct tg3 *tp, char *str)
17472 {
17473         if (tg3_flag(tp, PCI_EXPRESS)) {
17474                 strcpy(str, "PCI Express");
17475                 return str;
17476         } else if (tg3_flag(tp, PCIX_MODE)) {
17477                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17478
17479                 strcpy(str, "PCIX:");
17480
17481                 if ((clock_ctrl == 7) ||
17482                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17483                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17484                         strcat(str, "133MHz");
17485                 else if (clock_ctrl == 0)
17486                         strcat(str, "33MHz");
17487                 else if (clock_ctrl == 2)
17488                         strcat(str, "50MHz");
17489                 else if (clock_ctrl == 4)
17490                         strcat(str, "66MHz");
17491                 else if (clock_ctrl == 6)
17492                         strcat(str, "100MHz");
17493         } else {
17494                 strcpy(str, "PCI:");
17495                 if (tg3_flag(tp, PCI_HIGH_SPEED))
17496                         strcat(str, "66MHz");
17497                 else
17498                         strcat(str, "33MHz");
17499         }
17500         if (tg3_flag(tp, PCI_32BIT))
17501                 strcat(str, ":32-bit");
17502         else
17503                 strcat(str, ":64-bit");
17504         return str;
17505 }
17506
17507 static void tg3_init_coal(struct tg3 *tp)
17508 {
17509         struct ethtool_coalesce *ec = &tp->coal;
17510
17511         memset(ec, 0, sizeof(*ec));
17512         ec->cmd = ETHTOOL_GCOALESCE;
17513         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17514         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17515         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17516         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17517         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17518         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17519         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17520         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17521         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17522
17523         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17524                                  HOSTCC_MODE_CLRTICK_TXBD)) {
17525                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17526                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17527                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17528                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17529         }
17530
17531         if (tg3_flag(tp, 5705_PLUS)) {
17532                 ec->rx_coalesce_usecs_irq = 0;
17533                 ec->tx_coalesce_usecs_irq = 0;
17534                 ec->stats_block_coalesce_usecs = 0;
17535         }
17536 }
17537
17538 static int tg3_init_one(struct pci_dev *pdev,
17539                                   const struct pci_device_id *ent)
17540 {
17541         struct net_device *dev;
17542         struct tg3 *tp;
17543         int i, err;
17544         u32 sndmbx, rcvmbx, intmbx;
17545         char str[40];
17546         u64 dma_mask, persist_dma_mask;
17547         netdev_features_t features = 0;
17548         u8 addr[ETH_ALEN] __aligned(2);
17549
17550         err = pci_enable_device(pdev);
17551         if (err) {
17552                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17553                 return err;
17554         }
17555
17556         err = pci_request_regions(pdev, DRV_MODULE_NAME);
17557         if (err) {
17558                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17559                 goto err_out_disable_pdev;
17560         }
17561
17562         pci_set_master(pdev);
17563
17564         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17565         if (!dev) {
17566                 err = -ENOMEM;
17567                 goto err_out_free_res;
17568         }
17569
17570         SET_NETDEV_DEV(dev, &pdev->dev);
17571
17572         tp = netdev_priv(dev);
17573         tp->pdev = pdev;
17574         tp->dev = dev;
17575         tp->rx_mode = TG3_DEF_RX_MODE;
17576         tp->tx_mode = TG3_DEF_TX_MODE;
17577         tp->irq_sync = 1;
17578         tp->pcierr_recovery = false;
17579
17580         if (tg3_debug > 0)
17581                 tp->msg_enable = tg3_debug;
17582         else
17583                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17584
17585         if (pdev_is_ssb_gige_core(pdev)) {
17586                 tg3_flag_set(tp, IS_SSB_CORE);
17587                 if (ssb_gige_must_flush_posted_writes(pdev))
17588                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17589                 if (ssb_gige_one_dma_at_once(pdev))
17590                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17591                 if (ssb_gige_have_roboswitch(pdev)) {
17592                         tg3_flag_set(tp, USE_PHYLIB);
17593                         tg3_flag_set(tp, ROBOSWITCH);
17594                 }
17595                 if (ssb_gige_is_rgmii(pdev))
17596                         tg3_flag_set(tp, RGMII_MODE);
17597         }
17598
17599         /* The word/byte swap controls here control register access byte
17600          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17601          * setting below.
17602          */
17603         tp->misc_host_ctrl =
17604                 MISC_HOST_CTRL_MASK_PCI_INT |
17605                 MISC_HOST_CTRL_WORD_SWAP |
17606                 MISC_HOST_CTRL_INDIR_ACCESS |
17607                 MISC_HOST_CTRL_PCISTATE_RW;
17608
17609         /* The NONFRM (non-frame) byte/word swap controls take effect
17610          * on descriptor entries, anything which isn't packet data.
17611          *
17612          * The StrongARM chips on the board (one for tx, one for rx)
17613          * are running in big-endian mode.
17614          */
17615         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17616                         GRC_MODE_WSWAP_NONFRM_DATA);
17617 #ifdef __BIG_ENDIAN
17618         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17619 #endif
17620         spin_lock_init(&tp->lock);
17621         spin_lock_init(&tp->indirect_lock);
17622         INIT_WORK(&tp->reset_task, tg3_reset_task);
17623
17624         tp->regs = pci_ioremap_bar(pdev, BAR_0);
17625         if (!tp->regs) {
17626                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17627                 err = -ENOMEM;
17628                 goto err_out_free_dev;
17629         }
17630
17631         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17632             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17633             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17634             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17635             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17636             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17637             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17638             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17639             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17640             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17641             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17642             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17643             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17644             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17645             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17646                 tg3_flag_set(tp, ENABLE_APE);
17647                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17648                 if (!tp->aperegs) {
17649                         dev_err(&pdev->dev,
17650                                 "Cannot map APE registers, aborting\n");
17651                         err = -ENOMEM;
17652                         goto err_out_iounmap;
17653                 }
17654         }
17655
17656         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17657         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17658
17659         dev->ethtool_ops = &tg3_ethtool_ops;
17660         dev->watchdog_timeo = TG3_TX_TIMEOUT;
17661         dev->netdev_ops = &tg3_netdev_ops;
17662         dev->irq = pdev->irq;
17663
17664         err = tg3_get_invariants(tp, ent);
17665         if (err) {
17666                 dev_err(&pdev->dev,
17667                         "Problem fetching invariants of chip, aborting\n");
17668                 goto err_out_apeunmap;
17669         }
17670
17671         /* The EPB bridge inside 5714, 5715, and 5780 and any
17672          * device behind the EPB cannot support DMA addresses > 40-bit.
17673          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17674          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17675          * do DMA address check in tg3_start_xmit().
17676          */
17677         if (tg3_flag(tp, IS_5788))
17678                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17679         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17680                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17681 #ifdef CONFIG_HIGHMEM
17682                 dma_mask = DMA_BIT_MASK(64);
17683 #endif
17684         } else
17685                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17686
17687         /* Configure DMA attributes. */
17688         if (dma_mask > DMA_BIT_MASK(32)) {
17689                 err = dma_set_mask(&pdev->dev, dma_mask);
17690                 if (!err) {
17691                         features |= NETIF_F_HIGHDMA;
17692                         err = dma_set_coherent_mask(&pdev->dev,
17693                                                     persist_dma_mask);
17694                         if (err < 0) {
17695                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17696                                         "DMA for consistent allocations\n");
17697                                 goto err_out_apeunmap;
17698                         }
17699                 }
17700         }
17701         if (err || dma_mask == DMA_BIT_MASK(32)) {
17702                 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
17703                 if (err) {
17704                         dev_err(&pdev->dev,
17705                                 "No usable DMA configuration, aborting\n");
17706                         goto err_out_apeunmap;
17707                 }
17708         }
17709
17710         tg3_init_bufmgr_config(tp);
17711
17712         /* 5700 B0 chips do not support checksumming correctly due
17713          * to hardware bugs.
17714          */
17715         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17716                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17717
17718                 if (tg3_flag(tp, 5755_PLUS))
17719                         features |= NETIF_F_IPV6_CSUM;
17720         }
17721
17722         /* TSO is on by default on chips that support hardware TSO.
17723          * Firmware TSO on older chips gives lower performance, so it
17724          * is off by default, but can be enabled using ethtool.
17725          */
17726         if ((tg3_flag(tp, HW_TSO_1) ||
17727              tg3_flag(tp, HW_TSO_2) ||
17728              tg3_flag(tp, HW_TSO_3)) &&
17729             (features & NETIF_F_IP_CSUM))
17730                 features |= NETIF_F_TSO;
17731         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17732                 if (features & NETIF_F_IPV6_CSUM)
17733                         features |= NETIF_F_TSO6;
17734                 if (tg3_flag(tp, HW_TSO_3) ||
17735                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
17736                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17737                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17738                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
17739                     tg3_asic_rev(tp) == ASIC_REV_57780)
17740                         features |= NETIF_F_TSO_ECN;
17741         }
17742
17743         dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17744                          NETIF_F_HW_VLAN_CTAG_RX;
17745         dev->vlan_features |= features;
17746
17747         /*
17748          * Add loopback capability only for a subset of devices that support
17749          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17750          * loopback for the remaining devices.
17751          */
17752         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17753             !tg3_flag(tp, CPMU_PRESENT))
17754                 /* Add the loopback capability */
17755                 features |= NETIF_F_LOOPBACK;
17756
17757         dev->hw_features |= features;
17758         dev->priv_flags |= IFF_UNICAST_FLT;
17759
17760         /* MTU range: 60 - 9000 or 1500, depending on hardware */
17761         dev->min_mtu = TG3_MIN_MTU;
17762         dev->max_mtu = TG3_MAX_MTU(tp);
17763
17764         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17765             !tg3_flag(tp, TSO_CAPABLE) &&
17766             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17767                 tg3_flag_set(tp, MAX_RXPEND_64);
17768                 tp->rx_pending = 63;
17769         }
17770
17771         err = tg3_get_device_address(tp, addr);
17772         if (err) {
17773                 dev_err(&pdev->dev,
17774                         "Could not obtain valid ethernet address, aborting\n");
17775                 goto err_out_apeunmap;
17776         }
17777         eth_hw_addr_set(dev, addr);
17778
17779         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17780         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17781         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17782         for (i = 0; i < tp->irq_max; i++) {
17783                 struct tg3_napi *tnapi = &tp->napi[i];
17784
17785                 tnapi->tp = tp;
17786                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17787
17788                 tnapi->int_mbox = intmbx;
17789                 if (i <= 4)
17790                         intmbx += 0x8;
17791                 else
17792                         intmbx += 0x4;
17793
17794                 tnapi->consmbox = rcvmbx;
17795                 tnapi->prodmbox = sndmbx;
17796
17797                 if (i)
17798                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17799                 else
17800                         tnapi->coal_now = HOSTCC_MODE_NOW;
17801
17802                 if (!tg3_flag(tp, SUPPORT_MSIX))
17803                         break;
17804
17805                 /*
17806                  * If we support MSIX, we'll be using RSS.  If we're using
17807                  * RSS, the first vector only handles link interrupts and the
17808                  * remaining vectors handle rx and tx interrupts.  Reuse the
17809                  * mailbox values for the next iteration.  The values we setup
17810                  * above are still useful for the single vectored mode.
17811                  */
17812                 if (!i)
17813                         continue;
17814
17815                 rcvmbx += 0x8;
17816
17817                 if (sndmbx & 0x4)
17818                         sndmbx -= 0x4;
17819                 else
17820                         sndmbx += 0xc;
17821         }
17822
17823         /*
17824          * Reset chip in case UNDI or EFI driver did not shutdown
17825          * DMA self test will enable WDMAC and we'll see (spurious)
17826          * pending DMA on the PCI bus at that point.
17827          */
17828         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17829             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17830                 tg3_full_lock(tp, 0);
17831                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17832                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17833                 tg3_full_unlock(tp);
17834         }
17835
17836         err = tg3_test_dma(tp);
17837         if (err) {
17838                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17839                 goto err_out_apeunmap;
17840         }
17841
17842         tg3_init_coal(tp);
17843
17844         pci_set_drvdata(pdev, dev);
17845
17846         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17847             tg3_asic_rev(tp) == ASIC_REV_5720 ||
17848             tg3_asic_rev(tp) == ASIC_REV_5762)
17849                 tg3_flag_set(tp, PTP_CAPABLE);
17850
17851         tg3_timer_init(tp);
17852
17853         tg3_carrier_off(tp);
17854
17855         err = register_netdev(dev);
17856         if (err) {
17857                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17858                 goto err_out_apeunmap;
17859         }
17860
17861         if (tg3_flag(tp, PTP_CAPABLE)) {
17862                 tg3_ptp_init(tp);
17863                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17864                                                    &tp->pdev->dev);
17865                 if (IS_ERR(tp->ptp_clock))
17866                         tp->ptp_clock = NULL;
17867         }
17868
17869         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17870                     tp->board_part_number,
17871                     tg3_chip_rev_id(tp),
17872                     tg3_bus_string(tp, str),
17873                     dev->dev_addr);
17874
17875         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17876                 char *ethtype;
17877
17878                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17879                         ethtype = "10/100Base-TX";
17880                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17881                         ethtype = "1000Base-SX";
17882                 else
17883                         ethtype = "10/100/1000Base-T";
17884
17885                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17886                             "(WireSpeed[%d], EEE[%d])\n",
17887                             tg3_phy_string(tp), ethtype,
17888                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17889                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17890         }
17891
17892         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17893                     (dev->features & NETIF_F_RXCSUM) != 0,
17894                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
17895                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17896                     tg3_flag(tp, ENABLE_ASF) != 0,
17897                     tg3_flag(tp, TSO_CAPABLE) != 0);
17898         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17899                     tp->dma_rwctrl,
17900                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17901                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17902
17903         pci_save_state(pdev);
17904
17905         return 0;
17906
17907 err_out_apeunmap:
17908         if (tp->aperegs) {
17909                 iounmap(tp->aperegs);
17910                 tp->aperegs = NULL;
17911         }
17912
17913 err_out_iounmap:
17914         if (tp->regs) {
17915                 iounmap(tp->regs);
17916                 tp->regs = NULL;
17917         }
17918
17919 err_out_free_dev:
17920         free_netdev(dev);
17921
17922 err_out_free_res:
17923         pci_release_regions(pdev);
17924
17925 err_out_disable_pdev:
17926         if (pci_is_enabled(pdev))
17927                 pci_disable_device(pdev);
17928         return err;
17929 }
17930
17931 static void tg3_remove_one(struct pci_dev *pdev)
17932 {
17933         struct net_device *dev = pci_get_drvdata(pdev);
17934
17935         if (dev) {
17936                 struct tg3 *tp = netdev_priv(dev);
17937
17938                 tg3_ptp_fini(tp);
17939
17940                 release_firmware(tp->fw);
17941
17942                 tg3_reset_task_cancel(tp);
17943
17944                 if (tg3_flag(tp, USE_PHYLIB)) {
17945                         tg3_phy_fini(tp);
17946                         tg3_mdio_fini(tp);
17947                 }
17948
17949                 unregister_netdev(dev);
17950                 if (tp->aperegs) {
17951                         iounmap(tp->aperegs);
17952                         tp->aperegs = NULL;
17953                 }
17954                 if (tp->regs) {
17955                         iounmap(tp->regs);
17956                         tp->regs = NULL;
17957                 }
17958                 free_netdev(dev);
17959                 pci_release_regions(pdev);
17960                 pci_disable_device(pdev);
17961         }
17962 }
17963
17964 #ifdef CONFIG_PM_SLEEP
17965 static int tg3_suspend(struct device *device)
17966 {
17967         struct net_device *dev = dev_get_drvdata(device);
17968         struct tg3 *tp = netdev_priv(dev);
17969         int err = 0;
17970
17971         rtnl_lock();
17972
17973         if (!netif_running(dev))
17974                 goto unlock;
17975
17976         tg3_reset_task_cancel(tp);
17977         tg3_phy_stop(tp);
17978         tg3_netif_stop(tp);
17979
17980         tg3_timer_stop(tp);
17981
17982         tg3_full_lock(tp, 1);
17983         tg3_disable_ints(tp);
17984         tg3_full_unlock(tp);
17985
17986         netif_device_detach(dev);
17987
17988         tg3_full_lock(tp, 0);
17989         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17990         tg3_flag_clear(tp, INIT_COMPLETE);
17991         tg3_full_unlock(tp);
17992
17993         err = tg3_power_down_prepare(tp);
17994         if (err) {
17995                 int err2;
17996
17997                 tg3_full_lock(tp, 0);
17998
17999                 tg3_flag_set(tp, INIT_COMPLETE);
18000                 err2 = tg3_restart_hw(tp, true);
18001                 if (err2)
18002                         goto out;
18003
18004                 tg3_timer_start(tp);
18005
18006                 netif_device_attach(dev);
18007                 tg3_netif_start(tp);
18008
18009 out:
18010                 tg3_full_unlock(tp);
18011
18012                 if (!err2)
18013                         tg3_phy_start(tp);
18014         }
18015
18016 unlock:
18017         rtnl_unlock();
18018         return err;
18019 }
18020
18021 static int tg3_resume(struct device *device)
18022 {
18023         struct net_device *dev = dev_get_drvdata(device);
18024         struct tg3 *tp = netdev_priv(dev);
18025         int err = 0;
18026
18027         rtnl_lock();
18028
18029         if (!netif_running(dev))
18030                 goto unlock;
18031
18032         netif_device_attach(dev);
18033
18034         tg3_full_lock(tp, 0);
18035
18036         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18037
18038         tg3_flag_set(tp, INIT_COMPLETE);
18039         err = tg3_restart_hw(tp,
18040                              !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18041         if (err)
18042                 goto out;
18043
18044         tg3_timer_start(tp);
18045
18046         tg3_netif_start(tp);
18047
18048 out:
18049         tg3_full_unlock(tp);
18050
18051         if (!err)
18052                 tg3_phy_start(tp);
18053
18054 unlock:
18055         rtnl_unlock();
18056         return err;
18057 }
18058 #endif /* CONFIG_PM_SLEEP */
18059
18060 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18061
18062 static void tg3_shutdown(struct pci_dev *pdev)
18063 {
18064         struct net_device *dev = pci_get_drvdata(pdev);
18065         struct tg3 *tp = netdev_priv(dev);
18066
18067         rtnl_lock();
18068         netif_device_detach(dev);
18069
18070         if (netif_running(dev))
18071                 dev_close(dev);
18072
18073         if (system_state == SYSTEM_POWER_OFF)
18074                 tg3_power_down(tp);
18075
18076         rtnl_unlock();
18077 }
18078
18079 /**
18080  * tg3_io_error_detected - called when PCI error is detected
18081  * @pdev: Pointer to PCI device
18082  * @state: The current pci connection state
18083  *
18084  * This function is called after a PCI bus error affecting
18085  * this device has been detected.
18086  */
18087 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18088                                               pci_channel_state_t state)
18089 {
18090         struct net_device *netdev = pci_get_drvdata(pdev);
18091         struct tg3 *tp = netdev_priv(netdev);
18092         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18093
18094         netdev_info(netdev, "PCI I/O error detected\n");
18095
18096         rtnl_lock();
18097
18098         /* Could be second call or maybe we don't have netdev yet */
18099         if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
18100                 goto done;
18101
18102         /* We needn't recover from permanent error */
18103         if (state == pci_channel_io_frozen)
18104                 tp->pcierr_recovery = true;
18105
18106         tg3_phy_stop(tp);
18107
18108         tg3_netif_stop(tp);
18109
18110         tg3_timer_stop(tp);
18111
18112         /* Want to make sure that the reset task doesn't run */
18113         tg3_reset_task_cancel(tp);
18114
18115         netif_device_detach(netdev);
18116
18117         /* Clean up software state, even if MMIO is blocked */
18118         tg3_full_lock(tp, 0);
18119         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18120         tg3_full_unlock(tp);
18121
18122 done:
18123         if (state == pci_channel_io_perm_failure) {
18124                 if (netdev) {
18125                         tg3_napi_enable(tp);
18126                         dev_close(netdev);
18127                 }
18128                 err = PCI_ERS_RESULT_DISCONNECT;
18129         } else {
18130                 pci_disable_device(pdev);
18131         }
18132
18133         rtnl_unlock();
18134
18135         return err;
18136 }
18137
18138 /**
18139  * tg3_io_slot_reset - called after the pci bus has been reset.
18140  * @pdev: Pointer to PCI device
18141  *
18142  * Restart the card from scratch, as if from a cold-boot.
18143  * At this point, the card has exprienced a hard reset,
18144  * followed by fixups by BIOS, and has its config space
18145  * set up identically to what it was at cold boot.
18146  */
18147 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18148 {
18149         struct net_device *netdev = pci_get_drvdata(pdev);
18150         struct tg3 *tp = netdev_priv(netdev);
18151         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18152         int err;
18153
18154         rtnl_lock();
18155
18156         if (pci_enable_device(pdev)) {
18157                 dev_err(&pdev->dev,
18158                         "Cannot re-enable PCI device after reset.\n");
18159                 goto done;
18160         }
18161
18162         pci_set_master(pdev);
18163         pci_restore_state(pdev);
18164         pci_save_state(pdev);
18165
18166         if (!netdev || !netif_running(netdev)) {
18167                 rc = PCI_ERS_RESULT_RECOVERED;
18168                 goto done;
18169         }
18170
18171         err = tg3_power_up(tp);
18172         if (err)
18173                 goto done;
18174
18175         rc = PCI_ERS_RESULT_RECOVERED;
18176
18177 done:
18178         if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18179                 tg3_napi_enable(tp);
18180                 dev_close(netdev);
18181         }
18182         rtnl_unlock();
18183
18184         return rc;
18185 }
18186
18187 /**
18188  * tg3_io_resume - called when traffic can start flowing again.
18189  * @pdev: Pointer to PCI device
18190  *
18191  * This callback is called when the error recovery driver tells
18192  * us that its OK to resume normal operation.
18193  */
18194 static void tg3_io_resume(struct pci_dev *pdev)
18195 {
18196         struct net_device *netdev = pci_get_drvdata(pdev);
18197         struct tg3 *tp = netdev_priv(netdev);
18198         int err;
18199
18200         rtnl_lock();
18201
18202         if (!netdev || !netif_running(netdev))
18203                 goto done;
18204
18205         tg3_full_lock(tp, 0);
18206         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18207         tg3_flag_set(tp, INIT_COMPLETE);
18208         err = tg3_restart_hw(tp, true);
18209         if (err) {
18210                 tg3_full_unlock(tp);
18211                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18212                 goto done;
18213         }
18214
18215         netif_device_attach(netdev);
18216
18217         tg3_timer_start(tp);
18218
18219         tg3_netif_start(tp);
18220
18221         tg3_full_unlock(tp);
18222
18223         tg3_phy_start(tp);
18224
18225 done:
18226         tp->pcierr_recovery = false;
18227         rtnl_unlock();
18228 }
18229
18230 static const struct pci_error_handlers tg3_err_handler = {
18231         .error_detected = tg3_io_error_detected,
18232         .slot_reset     = tg3_io_slot_reset,
18233         .resume         = tg3_io_resume
18234 };
18235
18236 static struct pci_driver tg3_driver = {
18237         .name           = DRV_MODULE_NAME,
18238         .id_table       = tg3_pci_tbl,
18239         .probe          = tg3_init_one,
18240         .remove         = tg3_remove_one,
18241         .err_handler    = &tg3_err_handler,
18242         .driver.pm      = &tg3_pm_ops,
18243         .shutdown       = tg3_shutdown,
18244 };
18245
18246 module_pci_driver(tg3_driver);