GNU Linux-libre 6.8.9-gnu
[releases.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2016 Broadcom Corporation.
8  * Copyright (C) 2016-2017 Broadcom Limited.
9  * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10  * refers to Broadcom Inc. and/or its subsidiaries.
11  *
12 /*(DEBLOBBED)*/
13
14
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17 #include <linux/stringify.h>
18 #include <linux/kernel.h>
19 #include <linux/sched/signal.h>
20 #include <linux/types.h>
21 #include <linux/compiler.h>
22 #include <linux/slab.h>
23 #include <linux/delay.h>
24 #include <linux/in.h>
25 #include <linux/interrupt.h>
26 #include <linux/ioport.h>
27 #include <linux/pci.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/skbuff.h>
31 #include <linux/ethtool.h>
32 #include <linux/mdio.h>
33 #include <linux/mii.h>
34 #include <linux/phy.h>
35 #include <linux/brcmphy.h>
36 #include <linux/if.h>
37 #include <linux/if_vlan.h>
38 #include <linux/ip.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/firmware.h>
44 #include <linux/ssb/ssb_driver_gige.h>
45 #include <linux/hwmon.h>
46 #include <linux/hwmon-sysfs.h>
47 #include <linux/crc32poly.h>
48
49 #include <net/checksum.h>
50 #include <net/gso.h>
51 #include <net/ip.h>
52
53 #include <linux/io.h>
54 #include <asm/byteorder.h>
55 #include <linux/uaccess.h>
56
57 #include <uapi/linux/net_tstamp.h>
58 #include <linux/ptp_clock_kernel.h>
59
60 #define BAR_0   0
61 #define BAR_2   2
62
63 #include "tg3.h"
64
65 /* Functions & macros to verify TG3_FLAGS types */
66
67 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
68 {
69         return test_bit(flag, bits);
70 }
71
72 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
73 {
74         set_bit(flag, bits);
75 }
76
77 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
78 {
79         clear_bit(flag, bits);
80 }
81
82 #define tg3_flag(tp, flag)                              \
83         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
84 #define tg3_flag_set(tp, flag)                          \
85         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_clear(tp, flag)                        \
87         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
88
89 #define DRV_MODULE_NAME         "tg3"
90 /* DO NOT UPDATE TG3_*_NUM defines */
91 #define TG3_MAJ_NUM                     3
92 #define TG3_MIN_NUM                     137
93
94 #define RESET_KIND_SHUTDOWN     0
95 #define RESET_KIND_INIT         1
96 #define RESET_KIND_SUSPEND      2
97
98 #define TG3_DEF_RX_MODE         0
99 #define TG3_DEF_TX_MODE         0
100 #define TG3_DEF_MSG_ENABLE        \
101         (NETIF_MSG_DRV          | \
102          NETIF_MSG_PROBE        | \
103          NETIF_MSG_LINK         | \
104          NETIF_MSG_TIMER        | \
105          NETIF_MSG_IFDOWN       | \
106          NETIF_MSG_IFUP         | \
107          NETIF_MSG_RX_ERR       | \
108          NETIF_MSG_TX_ERR)
109
110 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
111
112 /* length of time before we decide the hardware is borked,
113  * and dev->tx_timeout() should be called to fix the problem
114  */
115
116 #define TG3_TX_TIMEOUT                  (5 * HZ)
117
118 /* hardware minimum and maximum for a single frame's data payload */
119 #define TG3_MIN_MTU                     ETH_ZLEN
120 #define TG3_MAX_MTU(tp) \
121         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
122
123 /* These numbers seem to be hard coded in the NIC firmware somehow.
124  * You can't change the ring sizes, but you can change where you place
125  * them in the NIC onboard memory.
126  */
127 #define TG3_RX_STD_RING_SIZE(tp) \
128         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
129          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
130 #define TG3_DEF_RX_RING_PENDING         200
131 #define TG3_RX_JMB_RING_SIZE(tp) \
132         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
133          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
134 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
135
136 /* Do not place this n-ring entries value into the tp struct itself,
137  * we really want to expose these constants to GCC so that modulo et
138  * al.  operations are done with shifts and masks instead of with
139  * hw multiply/modulo instructions.  Another solution would be to
140  * replace things like '% foo' with '& (foo - 1)'.
141  */
142
143 #define TG3_TX_RING_SIZE                512
144 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
145
146 #define TG3_RX_STD_RING_BYTES(tp) \
147         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
148 #define TG3_RX_JMB_RING_BYTES(tp) \
149         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
150 #define TG3_RX_RCB_RING_BYTES(tp) \
151         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
152 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
153                                  TG3_TX_RING_SIZE)
154 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
155
156 #define TG3_DMA_BYTE_ENAB               64
157
158 #define TG3_RX_STD_DMA_SZ               1536
159 #define TG3_RX_JMB_DMA_SZ               9046
160
161 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
162
163 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
164 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
165
166 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
167         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
168
169 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
170         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
171
172 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
173  * that are at least dword aligned when used in PCIX mode.  The driver
174  * works around this bug by double copying the packet.  This workaround
175  * is built into the normal double copy length check for efficiency.
176  *
177  * However, the double copy is only necessary on those architectures
178  * where unaligned memory accesses are inefficient.  For those architectures
179  * where unaligned memory accesses incur little penalty, we can reintegrate
180  * the 5701 in the normal rx path.  Doing so saves a device structure
181  * dereference by hardcoding the double copy threshold in place.
182  */
183 #define TG3_RX_COPY_THRESHOLD           256
184 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
185         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
186 #else
187         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
188 #endif
189
190 #if (NET_IP_ALIGN != 0)
191 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
192 #else
193 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
194 #endif
195
196 /* minimum number of free TX descriptors required to wake up TX process */
197 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
198 #define TG3_TX_BD_DMA_MAX_2K            2048
199 #define TG3_TX_BD_DMA_MAX_4K            4096
200
201 #define TG3_RAW_IP_ALIGN 2
202
203 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
204 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
205
206 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
207 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
208
209 #define FIRMWARE_TG3            "/*(DEBLOBBED)*/"
210 #define FIRMWARE_TG357766       "/*(DEBLOBBED)*/"
211 #define FIRMWARE_TG3TSO         "/*(DEBLOBBED)*/"
212 #define FIRMWARE_TG3TSO5        "/*(DEBLOBBED)*/"
213
214 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
215 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
216 MODULE_LICENSE("GPL");
217 /*(DEBLOBBED)*/
218
219 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
220 module_param(tg3_debug, int, 0);
221 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
222
223 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
224 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
225
226 static const struct pci_device_id tg3_pci_tbl[] = {
227         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
246          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
247                         TG3_DRV_DATA_FLAG_5705_10_100},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
249          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
250                         TG3_DRV_DATA_FLAG_5705_10_100},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
253          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
254                         TG3_DRV_DATA_FLAG_5705_10_100},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
261          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
267          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
275         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
276                         PCI_VENDOR_ID_LENOVO,
277                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
278          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
281          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
300         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
301                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
302          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
303         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
304                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
305          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
308         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
309          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
310         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
311         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
312         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
313         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
314         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
315         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
316         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
317         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
318         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
319          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
320         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
321          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
322         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
329         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
330         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
332         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
334         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
335         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
336         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
337         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
338         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
339         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
340         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
341         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
342         {}
343 };
344
345 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
346
347 static const struct {
348         const char string[ETH_GSTRING_LEN];
349 } ethtool_stats_keys[] = {
350         { "rx_octets" },
351         { "rx_fragments" },
352         { "rx_ucast_packets" },
353         { "rx_mcast_packets" },
354         { "rx_bcast_packets" },
355         { "rx_fcs_errors" },
356         { "rx_align_errors" },
357         { "rx_xon_pause_rcvd" },
358         { "rx_xoff_pause_rcvd" },
359         { "rx_mac_ctrl_rcvd" },
360         { "rx_xoff_entered" },
361         { "rx_frame_too_long_errors" },
362         { "rx_jabbers" },
363         { "rx_undersize_packets" },
364         { "rx_in_length_errors" },
365         { "rx_out_length_errors" },
366         { "rx_64_or_less_octet_packets" },
367         { "rx_65_to_127_octet_packets" },
368         { "rx_128_to_255_octet_packets" },
369         { "rx_256_to_511_octet_packets" },
370         { "rx_512_to_1023_octet_packets" },
371         { "rx_1024_to_1522_octet_packets" },
372         { "rx_1523_to_2047_octet_packets" },
373         { "rx_2048_to_4095_octet_packets" },
374         { "rx_4096_to_8191_octet_packets" },
375         { "rx_8192_to_9022_octet_packets" },
376
377         { "tx_octets" },
378         { "tx_collisions" },
379
380         { "tx_xon_sent" },
381         { "tx_xoff_sent" },
382         { "tx_flow_control" },
383         { "tx_mac_errors" },
384         { "tx_single_collisions" },
385         { "tx_mult_collisions" },
386         { "tx_deferred" },
387         { "tx_excessive_collisions" },
388         { "tx_late_collisions" },
389         { "tx_collide_2times" },
390         { "tx_collide_3times" },
391         { "tx_collide_4times" },
392         { "tx_collide_5times" },
393         { "tx_collide_6times" },
394         { "tx_collide_7times" },
395         { "tx_collide_8times" },
396         { "tx_collide_9times" },
397         { "tx_collide_10times" },
398         { "tx_collide_11times" },
399         { "tx_collide_12times" },
400         { "tx_collide_13times" },
401         { "tx_collide_14times" },
402         { "tx_collide_15times" },
403         { "tx_ucast_packets" },
404         { "tx_mcast_packets" },
405         { "tx_bcast_packets" },
406         { "tx_carrier_sense_errors" },
407         { "tx_discards" },
408         { "tx_errors" },
409
410         { "dma_writeq_full" },
411         { "dma_write_prioq_full" },
412         { "rxbds_empty" },
413         { "rx_discards" },
414         { "rx_errors" },
415         { "rx_threshold_hit" },
416
417         { "dma_readq_full" },
418         { "dma_read_prioq_full" },
419         { "tx_comp_queue_full" },
420
421         { "ring_set_send_prod_index" },
422         { "ring_status_update" },
423         { "nic_irqs" },
424         { "nic_avoided_irqs" },
425         { "nic_tx_threshold_hit" },
426
427         { "mbuf_lwm_thresh_hit" },
428 };
429
430 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
431 #define TG3_NVRAM_TEST          0
432 #define TG3_LINK_TEST           1
433 #define TG3_REGISTER_TEST       2
434 #define TG3_MEMORY_TEST         3
435 #define TG3_MAC_LOOPB_TEST      4
436 #define TG3_PHY_LOOPB_TEST      5
437 #define TG3_EXT_LOOPB_TEST      6
438 #define TG3_INTERRUPT_TEST      7
439
440
441 static const struct {
442         const char string[ETH_GSTRING_LEN];
443 } ethtool_test_keys[] = {
444         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
445         [TG3_LINK_TEST]         = { "link test         (online) " },
446         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
447         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
448         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
449         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
450         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
451         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
452 };
453
454 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
455
456
457 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
458 {
459         writel(val, tp->regs + off);
460 }
461
462 static u32 tg3_read32(struct tg3 *tp, u32 off)
463 {
464         return readl(tp->regs + off);
465 }
466
467 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
468 {
469         writel(val, tp->aperegs + off);
470 }
471
472 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
473 {
474         return readl(tp->aperegs + off);
475 }
476
477 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
478 {
479         unsigned long flags;
480
481         spin_lock_irqsave(&tp->indirect_lock, flags);
482         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
483         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
484         spin_unlock_irqrestore(&tp->indirect_lock, flags);
485 }
486
487 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
488 {
489         writel(val, tp->regs + off);
490         readl(tp->regs + off);
491 }
492
493 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
494 {
495         unsigned long flags;
496         u32 val;
497
498         spin_lock_irqsave(&tp->indirect_lock, flags);
499         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
500         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
501         spin_unlock_irqrestore(&tp->indirect_lock, flags);
502         return val;
503 }
504
505 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
506 {
507         unsigned long flags;
508
509         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
510                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
511                                        TG3_64BIT_REG_LOW, val);
512                 return;
513         }
514         if (off == TG3_RX_STD_PROD_IDX_REG) {
515                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
516                                        TG3_64BIT_REG_LOW, val);
517                 return;
518         }
519
520         spin_lock_irqsave(&tp->indirect_lock, flags);
521         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
522         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
523         spin_unlock_irqrestore(&tp->indirect_lock, flags);
524
525         /* In indirect mode when disabling interrupts, we also need
526          * to clear the interrupt bit in the GRC local ctrl register.
527          */
528         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
529             (val == 0x1)) {
530                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
531                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
532         }
533 }
534
535 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
536 {
537         unsigned long flags;
538         u32 val;
539
540         spin_lock_irqsave(&tp->indirect_lock, flags);
541         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
542         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
543         spin_unlock_irqrestore(&tp->indirect_lock, flags);
544         return val;
545 }
546
547 /* usec_wait specifies the wait time in usec when writing to certain registers
548  * where it is unsafe to read back the register without some delay.
549  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
550  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
551  */
552 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
553 {
554         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
555                 /* Non-posted methods */
556                 tp->write32(tp, off, val);
557         else {
558                 /* Posted method */
559                 tg3_write32(tp, off, val);
560                 if (usec_wait)
561                         udelay(usec_wait);
562                 tp->read32(tp, off);
563         }
564         /* Wait again after the read for the posted method to guarantee that
565          * the wait time is met.
566          */
567         if (usec_wait)
568                 udelay(usec_wait);
569 }
570
571 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
572 {
573         tp->write32_mbox(tp, off, val);
574         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
575             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
576              !tg3_flag(tp, ICH_WORKAROUND)))
577                 tp->read32_mbox(tp, off);
578 }
579
580 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
581 {
582         void __iomem *mbox = tp->regs + off;
583         writel(val, mbox);
584         if (tg3_flag(tp, TXD_MBOX_HWBUG))
585                 writel(val, mbox);
586         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
587             tg3_flag(tp, FLUSH_POSTED_WRITES))
588                 readl(mbox);
589 }
590
591 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
592 {
593         return readl(tp->regs + off + GRCMBOX_BASE);
594 }
595
596 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
597 {
598         writel(val, tp->regs + off + GRCMBOX_BASE);
599 }
600
601 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
602 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
603 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
604 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
605 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
606
607 #define tw32(reg, val)                  tp->write32(tp, reg, val)
608 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
609 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
610 #define tr32(reg)                       tp->read32(tp, reg)
611
612 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
613 {
614         unsigned long flags;
615
616         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
617             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
618                 return;
619
620         spin_lock_irqsave(&tp->indirect_lock, flags);
621         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
622                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
623                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
624
625                 /* Always leave this as zero. */
626                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
627         } else {
628                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
629                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
630
631                 /* Always leave this as zero. */
632                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
633         }
634         spin_unlock_irqrestore(&tp->indirect_lock, flags);
635 }
636
637 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
638 {
639         unsigned long flags;
640
641         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
642             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
643                 *val = 0;
644                 return;
645         }
646
647         spin_lock_irqsave(&tp->indirect_lock, flags);
648         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
649                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
650                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
651
652                 /* Always leave this as zero. */
653                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
654         } else {
655                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
656                 *val = tr32(TG3PCI_MEM_WIN_DATA);
657
658                 /* Always leave this as zero. */
659                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
660         }
661         spin_unlock_irqrestore(&tp->indirect_lock, flags);
662 }
663
664 static void tg3_ape_lock_init(struct tg3 *tp)
665 {
666         int i;
667         u32 regbase, bit;
668
669         if (tg3_asic_rev(tp) == ASIC_REV_5761)
670                 regbase = TG3_APE_LOCK_GRANT;
671         else
672                 regbase = TG3_APE_PER_LOCK_GRANT;
673
674         /* Make sure the driver hasn't any stale locks. */
675         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
676                 switch (i) {
677                 case TG3_APE_LOCK_PHY0:
678                 case TG3_APE_LOCK_PHY1:
679                 case TG3_APE_LOCK_PHY2:
680                 case TG3_APE_LOCK_PHY3:
681                         bit = APE_LOCK_GRANT_DRIVER;
682                         break;
683                 default:
684                         if (!tp->pci_fn)
685                                 bit = APE_LOCK_GRANT_DRIVER;
686                         else
687                                 bit = 1 << tp->pci_fn;
688                 }
689                 tg3_ape_write32(tp, regbase + 4 * i, bit);
690         }
691
692 }
693
694 static int tg3_ape_lock(struct tg3 *tp, int locknum)
695 {
696         int i, off;
697         int ret = 0;
698         u32 status, req, gnt, bit;
699
700         if (!tg3_flag(tp, ENABLE_APE))
701                 return 0;
702
703         switch (locknum) {
704         case TG3_APE_LOCK_GPIO:
705                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
706                         return 0;
707                 fallthrough;
708         case TG3_APE_LOCK_GRC:
709         case TG3_APE_LOCK_MEM:
710                 if (!tp->pci_fn)
711                         bit = APE_LOCK_REQ_DRIVER;
712                 else
713                         bit = 1 << tp->pci_fn;
714                 break;
715         case TG3_APE_LOCK_PHY0:
716         case TG3_APE_LOCK_PHY1:
717         case TG3_APE_LOCK_PHY2:
718         case TG3_APE_LOCK_PHY3:
719                 bit = APE_LOCK_REQ_DRIVER;
720                 break;
721         default:
722                 return -EINVAL;
723         }
724
725         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
726                 req = TG3_APE_LOCK_REQ;
727                 gnt = TG3_APE_LOCK_GRANT;
728         } else {
729                 req = TG3_APE_PER_LOCK_REQ;
730                 gnt = TG3_APE_PER_LOCK_GRANT;
731         }
732
733         off = 4 * locknum;
734
735         tg3_ape_write32(tp, req + off, bit);
736
737         /* Wait for up to 1 millisecond to acquire lock. */
738         for (i = 0; i < 100; i++) {
739                 status = tg3_ape_read32(tp, gnt + off);
740                 if (status == bit)
741                         break;
742                 if (pci_channel_offline(tp->pdev))
743                         break;
744
745                 udelay(10);
746         }
747
748         if (status != bit) {
749                 /* Revoke the lock request. */
750                 tg3_ape_write32(tp, gnt + off, bit);
751                 ret = -EBUSY;
752         }
753
754         return ret;
755 }
756
757 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
758 {
759         u32 gnt, bit;
760
761         if (!tg3_flag(tp, ENABLE_APE))
762                 return;
763
764         switch (locknum) {
765         case TG3_APE_LOCK_GPIO:
766                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
767                         return;
768                 fallthrough;
769         case TG3_APE_LOCK_GRC:
770         case TG3_APE_LOCK_MEM:
771                 if (!tp->pci_fn)
772                         bit = APE_LOCK_GRANT_DRIVER;
773                 else
774                         bit = 1 << tp->pci_fn;
775                 break;
776         case TG3_APE_LOCK_PHY0:
777         case TG3_APE_LOCK_PHY1:
778         case TG3_APE_LOCK_PHY2:
779         case TG3_APE_LOCK_PHY3:
780                 bit = APE_LOCK_GRANT_DRIVER;
781                 break;
782         default:
783                 return;
784         }
785
786         if (tg3_asic_rev(tp) == ASIC_REV_5761)
787                 gnt = TG3_APE_LOCK_GRANT;
788         else
789                 gnt = TG3_APE_PER_LOCK_GRANT;
790
791         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
792 }
793
794 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
795 {
796         u32 apedata;
797
798         while (timeout_us) {
799                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
800                         return -EBUSY;
801
802                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
803                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
804                         break;
805
806                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
807
808                 udelay(10);
809                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
810         }
811
812         return timeout_us ? 0 : -EBUSY;
813 }
814
815 #ifdef CONFIG_TIGON3_HWMON
816 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
817 {
818         u32 i, apedata;
819
820         for (i = 0; i < timeout_us / 10; i++) {
821                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
822
823                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
824                         break;
825
826                 udelay(10);
827         }
828
829         return i == timeout_us / 10;
830 }
831
832 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
833                                    u32 len)
834 {
835         int err;
836         u32 i, bufoff, msgoff, maxlen, apedata;
837
838         if (!tg3_flag(tp, APE_HAS_NCSI))
839                 return 0;
840
841         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
842         if (apedata != APE_SEG_SIG_MAGIC)
843                 return -ENODEV;
844
845         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
846         if (!(apedata & APE_FW_STATUS_READY))
847                 return -EAGAIN;
848
849         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
850                  TG3_APE_SHMEM_BASE;
851         msgoff = bufoff + 2 * sizeof(u32);
852         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
853
854         while (len) {
855                 u32 length;
856
857                 /* Cap xfer sizes to scratchpad limits. */
858                 length = (len > maxlen) ? maxlen : len;
859                 len -= length;
860
861                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
862                 if (!(apedata & APE_FW_STATUS_READY))
863                         return -EAGAIN;
864
865                 /* Wait for up to 1 msec for APE to service previous event. */
866                 err = tg3_ape_event_lock(tp, 1000);
867                 if (err)
868                         return err;
869
870                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
871                           APE_EVENT_STATUS_SCRTCHPD_READ |
872                           APE_EVENT_STATUS_EVENT_PENDING;
873                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
874
875                 tg3_ape_write32(tp, bufoff, base_off);
876                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
877
878                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
879                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
880
881                 base_off += length;
882
883                 if (tg3_ape_wait_for_event(tp, 30000))
884                         return -EAGAIN;
885
886                 for (i = 0; length; i += 4, length -= 4) {
887                         u32 val = tg3_ape_read32(tp, msgoff + i);
888                         memcpy(data, &val, sizeof(u32));
889                         data++;
890                 }
891         }
892
893         return 0;
894 }
895 #endif
896
897 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
898 {
899         int err;
900         u32 apedata;
901
902         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
903         if (apedata != APE_SEG_SIG_MAGIC)
904                 return -EAGAIN;
905
906         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
907         if (!(apedata & APE_FW_STATUS_READY))
908                 return -EAGAIN;
909
910         /* Wait for up to 20 millisecond for APE to service previous event. */
911         err = tg3_ape_event_lock(tp, 20000);
912         if (err)
913                 return err;
914
915         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
916                         event | APE_EVENT_STATUS_EVENT_PENDING);
917
918         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
919         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
920
921         return 0;
922 }
923
924 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
925 {
926         u32 event;
927         u32 apedata;
928
929         if (!tg3_flag(tp, ENABLE_APE))
930                 return;
931
932         switch (kind) {
933         case RESET_KIND_INIT:
934                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
935                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
936                                 APE_HOST_SEG_SIG_MAGIC);
937                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
938                                 APE_HOST_SEG_LEN_MAGIC);
939                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
940                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
941                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
942                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
943                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
944                                 APE_HOST_BEHAV_NO_PHYLOCK);
945                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
946                                     TG3_APE_HOST_DRVR_STATE_START);
947
948                 event = APE_EVENT_STATUS_STATE_START;
949                 break;
950         case RESET_KIND_SHUTDOWN:
951                 if (device_may_wakeup(&tp->pdev->dev) &&
952                     tg3_flag(tp, WOL_ENABLE)) {
953                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
954                                             TG3_APE_HOST_WOL_SPEED_AUTO);
955                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
956                 } else
957                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
958
959                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
960
961                 event = APE_EVENT_STATUS_STATE_UNLOAD;
962                 break;
963         default:
964                 return;
965         }
966
967         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
968
969         tg3_ape_send_event(tp, event);
970 }
971
972 static void tg3_send_ape_heartbeat(struct tg3 *tp,
973                                    unsigned long interval)
974 {
975         /* Check if hb interval has exceeded */
976         if (!tg3_flag(tp, ENABLE_APE) ||
977             time_before(jiffies, tp->ape_hb_jiffies + interval))
978                 return;
979
980         tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
981         tp->ape_hb_jiffies = jiffies;
982 }
983
984 static void tg3_disable_ints(struct tg3 *tp)
985 {
986         int i;
987
988         tw32(TG3PCI_MISC_HOST_CTRL,
989              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
990         for (i = 0; i < tp->irq_max; i++)
991                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
992 }
993
994 static void tg3_enable_ints(struct tg3 *tp)
995 {
996         int i;
997
998         tp->irq_sync = 0;
999         wmb();
1000
1001         tw32(TG3PCI_MISC_HOST_CTRL,
1002              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1003
1004         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1005         for (i = 0; i < tp->irq_cnt; i++) {
1006                 struct tg3_napi *tnapi = &tp->napi[i];
1007
1008                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1009                 if (tg3_flag(tp, 1SHOT_MSI))
1010                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1011
1012                 tp->coal_now |= tnapi->coal_now;
1013         }
1014
1015         /* Force an initial interrupt */
1016         if (!tg3_flag(tp, TAGGED_STATUS) &&
1017             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1018                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1019         else
1020                 tw32(HOSTCC_MODE, tp->coal_now);
1021
1022         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1023 }
1024
1025 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1026 {
1027         struct tg3 *tp = tnapi->tp;
1028         struct tg3_hw_status *sblk = tnapi->hw_status;
1029         unsigned int work_exists = 0;
1030
1031         /* check for phy events */
1032         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1033                 if (sblk->status & SD_STATUS_LINK_CHG)
1034                         work_exists = 1;
1035         }
1036
1037         /* check for TX work to do */
1038         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1039                 work_exists = 1;
1040
1041         /* check for RX work to do */
1042         if (tnapi->rx_rcb_prod_idx &&
1043             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1044                 work_exists = 1;
1045
1046         return work_exists;
1047 }
1048
1049 /* tg3_int_reenable
1050  *  similar to tg3_enable_ints, but it accurately determines whether there
1051  *  is new work pending and can return without flushing the PIO write
1052  *  which reenables interrupts
1053  */
1054 static void tg3_int_reenable(struct tg3_napi *tnapi)
1055 {
1056         struct tg3 *tp = tnapi->tp;
1057
1058         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1059
1060         /* When doing tagged status, this work check is unnecessary.
1061          * The last_tag we write above tells the chip which piece of
1062          * work we've completed.
1063          */
1064         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1065                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1066                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1067 }
1068
1069 static void tg3_switch_clocks(struct tg3 *tp)
1070 {
1071         u32 clock_ctrl;
1072         u32 orig_clock_ctrl;
1073
1074         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1075                 return;
1076
1077         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1078
1079         orig_clock_ctrl = clock_ctrl;
1080         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1081                        CLOCK_CTRL_CLKRUN_OENABLE |
1082                        0x1f);
1083         tp->pci_clock_ctrl = clock_ctrl;
1084
1085         if (tg3_flag(tp, 5705_PLUS)) {
1086                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1087                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1088                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1089                 }
1090         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1091                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1092                             clock_ctrl |
1093                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1094                             40);
1095                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1096                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1097                             40);
1098         }
1099         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1100 }
1101
1102 #define PHY_BUSY_LOOPS  5000
1103
1104 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1105                          u32 *val)
1106 {
1107         u32 frame_val;
1108         unsigned int loops;
1109         int ret;
1110
1111         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1112                 tw32_f(MAC_MI_MODE,
1113                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1114                 udelay(80);
1115         }
1116
1117         tg3_ape_lock(tp, tp->phy_ape_lock);
1118
1119         *val = 0x0;
1120
1121         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1122                       MI_COM_PHY_ADDR_MASK);
1123         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1124                       MI_COM_REG_ADDR_MASK);
1125         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1126
1127         tw32_f(MAC_MI_COM, frame_val);
1128
1129         loops = PHY_BUSY_LOOPS;
1130         while (loops != 0) {
1131                 udelay(10);
1132                 frame_val = tr32(MAC_MI_COM);
1133
1134                 if ((frame_val & MI_COM_BUSY) == 0) {
1135                         udelay(5);
1136                         frame_val = tr32(MAC_MI_COM);
1137                         break;
1138                 }
1139                 loops -= 1;
1140         }
1141
1142         ret = -EBUSY;
1143         if (loops != 0) {
1144                 *val = frame_val & MI_COM_DATA_MASK;
1145                 ret = 0;
1146         }
1147
1148         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1149                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1150                 udelay(80);
1151         }
1152
1153         tg3_ape_unlock(tp, tp->phy_ape_lock);
1154
1155         return ret;
1156 }
1157
1158 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1159 {
1160         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1161 }
1162
1163 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1164                           u32 val)
1165 {
1166         u32 frame_val;
1167         unsigned int loops;
1168         int ret;
1169
1170         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1171             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1172                 return 0;
1173
1174         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1175                 tw32_f(MAC_MI_MODE,
1176                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1177                 udelay(80);
1178         }
1179
1180         tg3_ape_lock(tp, tp->phy_ape_lock);
1181
1182         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1183                       MI_COM_PHY_ADDR_MASK);
1184         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1185                       MI_COM_REG_ADDR_MASK);
1186         frame_val |= (val & MI_COM_DATA_MASK);
1187         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1188
1189         tw32_f(MAC_MI_COM, frame_val);
1190
1191         loops = PHY_BUSY_LOOPS;
1192         while (loops != 0) {
1193                 udelay(10);
1194                 frame_val = tr32(MAC_MI_COM);
1195                 if ((frame_val & MI_COM_BUSY) == 0) {
1196                         udelay(5);
1197                         frame_val = tr32(MAC_MI_COM);
1198                         break;
1199                 }
1200                 loops -= 1;
1201         }
1202
1203         ret = -EBUSY;
1204         if (loops != 0)
1205                 ret = 0;
1206
1207         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1208                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1209                 udelay(80);
1210         }
1211
1212         tg3_ape_unlock(tp, tp->phy_ape_lock);
1213
1214         return ret;
1215 }
1216
1217 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1218 {
1219         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1220 }
1221
1222 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1223 {
1224         int err;
1225
1226         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1227         if (err)
1228                 goto done;
1229
1230         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1231         if (err)
1232                 goto done;
1233
1234         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1235                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1236         if (err)
1237                 goto done;
1238
1239         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1240
1241 done:
1242         return err;
1243 }
1244
1245 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1246 {
1247         int err;
1248
1249         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1250         if (err)
1251                 goto done;
1252
1253         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1254         if (err)
1255                 goto done;
1256
1257         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1258                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1259         if (err)
1260                 goto done;
1261
1262         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1263
1264 done:
1265         return err;
1266 }
1267
1268 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1269 {
1270         int err;
1271
1272         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1273         if (!err)
1274                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1275
1276         return err;
1277 }
1278
1279 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1280 {
1281         int err;
1282
1283         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1284         if (!err)
1285                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1286
1287         return err;
1288 }
1289
1290 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1291 {
1292         int err;
1293
1294         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1295                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1296                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1297         if (!err)
1298                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1299
1300         return err;
1301 }
1302
1303 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1304 {
1305         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1306                 set |= MII_TG3_AUXCTL_MISC_WREN;
1307
1308         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1309 }
1310
1311 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1312 {
1313         u32 val;
1314         int err;
1315
1316         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1317
1318         if (err)
1319                 return err;
1320
1321         if (enable)
1322                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1323         else
1324                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1325
1326         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1327                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1328
1329         return err;
1330 }
1331
1332 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1333 {
1334         return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1335                             reg | val | MII_TG3_MISC_SHDW_WREN);
1336 }
1337
1338 static int tg3_bmcr_reset(struct tg3 *tp)
1339 {
1340         u32 phy_control;
1341         int limit, err;
1342
1343         /* OK, reset it, and poll the BMCR_RESET bit until it
1344          * clears or we time out.
1345          */
1346         phy_control = BMCR_RESET;
1347         err = tg3_writephy(tp, MII_BMCR, phy_control);
1348         if (err != 0)
1349                 return -EBUSY;
1350
1351         limit = 5000;
1352         while (limit--) {
1353                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1354                 if (err != 0)
1355                         return -EBUSY;
1356
1357                 if ((phy_control & BMCR_RESET) == 0) {
1358                         udelay(40);
1359                         break;
1360                 }
1361                 udelay(10);
1362         }
1363         if (limit < 0)
1364                 return -EBUSY;
1365
1366         return 0;
1367 }
1368
1369 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1370 {
1371         struct tg3 *tp = bp->priv;
1372         u32 val;
1373
1374         spin_lock_bh(&tp->lock);
1375
1376         if (__tg3_readphy(tp, mii_id, reg, &val))
1377                 val = -EIO;
1378
1379         spin_unlock_bh(&tp->lock);
1380
1381         return val;
1382 }
1383
1384 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1385 {
1386         struct tg3 *tp = bp->priv;
1387         u32 ret = 0;
1388
1389         spin_lock_bh(&tp->lock);
1390
1391         if (__tg3_writephy(tp, mii_id, reg, val))
1392                 ret = -EIO;
1393
1394         spin_unlock_bh(&tp->lock);
1395
1396         return ret;
1397 }
1398
1399 static void tg3_mdio_config_5785(struct tg3 *tp)
1400 {
1401         u32 val;
1402         struct phy_device *phydev;
1403
1404         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1405         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1406         case PHY_ID_BCM50610:
1407         case PHY_ID_BCM50610M:
1408                 val = MAC_PHYCFG2_50610_LED_MODES;
1409                 break;
1410         case PHY_ID_BCMAC131:
1411                 val = MAC_PHYCFG2_AC131_LED_MODES;
1412                 break;
1413         case PHY_ID_RTL8211C:
1414                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1415                 break;
1416         case PHY_ID_RTL8201E:
1417                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1418                 break;
1419         default:
1420                 return;
1421         }
1422
1423         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1424                 tw32(MAC_PHYCFG2, val);
1425
1426                 val = tr32(MAC_PHYCFG1);
1427                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1428                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1429                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1430                 tw32(MAC_PHYCFG1, val);
1431
1432                 return;
1433         }
1434
1435         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1436                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1437                        MAC_PHYCFG2_FMODE_MASK_MASK |
1438                        MAC_PHYCFG2_GMODE_MASK_MASK |
1439                        MAC_PHYCFG2_ACT_MASK_MASK   |
1440                        MAC_PHYCFG2_QUAL_MASK_MASK |
1441                        MAC_PHYCFG2_INBAND_ENABLE;
1442
1443         tw32(MAC_PHYCFG2, val);
1444
1445         val = tr32(MAC_PHYCFG1);
1446         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1447                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1448         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1449                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1450                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1451                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1452                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1453         }
1454         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1455                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1456         tw32(MAC_PHYCFG1, val);
1457
1458         val = tr32(MAC_EXT_RGMII_MODE);
1459         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1460                  MAC_RGMII_MODE_RX_QUALITY |
1461                  MAC_RGMII_MODE_RX_ACTIVITY |
1462                  MAC_RGMII_MODE_RX_ENG_DET |
1463                  MAC_RGMII_MODE_TX_ENABLE |
1464                  MAC_RGMII_MODE_TX_LOWPWR |
1465                  MAC_RGMII_MODE_TX_RESET);
1466         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1467                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1468                         val |= MAC_RGMII_MODE_RX_INT_B |
1469                                MAC_RGMII_MODE_RX_QUALITY |
1470                                MAC_RGMII_MODE_RX_ACTIVITY |
1471                                MAC_RGMII_MODE_RX_ENG_DET;
1472                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1473                         val |= MAC_RGMII_MODE_TX_ENABLE |
1474                                MAC_RGMII_MODE_TX_LOWPWR |
1475                                MAC_RGMII_MODE_TX_RESET;
1476         }
1477         tw32(MAC_EXT_RGMII_MODE, val);
1478 }
1479
1480 static void tg3_mdio_start(struct tg3 *tp)
1481 {
1482         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1483         tw32_f(MAC_MI_MODE, tp->mi_mode);
1484         udelay(80);
1485
1486         if (tg3_flag(tp, MDIOBUS_INITED) &&
1487             tg3_asic_rev(tp) == ASIC_REV_5785)
1488                 tg3_mdio_config_5785(tp);
1489 }
1490
1491 static int tg3_mdio_init(struct tg3 *tp)
1492 {
1493         int i;
1494         u32 reg;
1495         struct phy_device *phydev;
1496
1497         if (tg3_flag(tp, 5717_PLUS)) {
1498                 u32 is_serdes;
1499
1500                 tp->phy_addr = tp->pci_fn + 1;
1501
1502                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1503                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1504                 else
1505                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1506                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1507                 if (is_serdes)
1508                         tp->phy_addr += 7;
1509         } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1510                 int addr;
1511
1512                 addr = ssb_gige_get_phyaddr(tp->pdev);
1513                 if (addr < 0)
1514                         return addr;
1515                 tp->phy_addr = addr;
1516         } else
1517                 tp->phy_addr = TG3_PHY_MII_ADDR;
1518
1519         tg3_mdio_start(tp);
1520
1521         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1522                 return 0;
1523
1524         tp->mdio_bus = mdiobus_alloc();
1525         if (tp->mdio_bus == NULL)
1526                 return -ENOMEM;
1527
1528         tp->mdio_bus->name     = "tg3 mdio bus";
1529         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", pci_dev_id(tp->pdev));
1530         tp->mdio_bus->priv     = tp;
1531         tp->mdio_bus->parent   = &tp->pdev->dev;
1532         tp->mdio_bus->read     = &tg3_mdio_read;
1533         tp->mdio_bus->write    = &tg3_mdio_write;
1534         tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1535
1536         /* The bus registration will look for all the PHYs on the mdio bus.
1537          * Unfortunately, it does not ensure the PHY is powered up before
1538          * accessing the PHY ID registers.  A chip reset is the
1539          * quickest way to bring the device back to an operational state..
1540          */
1541         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1542                 tg3_bmcr_reset(tp);
1543
1544         i = mdiobus_register(tp->mdio_bus);
1545         if (i) {
1546                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1547                 mdiobus_free(tp->mdio_bus);
1548                 return i;
1549         }
1550
1551         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1552
1553         if (!phydev || !phydev->drv) {
1554                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1555                 mdiobus_unregister(tp->mdio_bus);
1556                 mdiobus_free(tp->mdio_bus);
1557                 return -ENODEV;
1558         }
1559
1560         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1561         case PHY_ID_BCM57780:
1562                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1563                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1564                 break;
1565         case PHY_ID_BCM50610:
1566         case PHY_ID_BCM50610M:
1567                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1568                                      PHY_BRCM_RX_REFCLK_UNUSED |
1569                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1570                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1571                 fallthrough;
1572         case PHY_ID_RTL8211C:
1573                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1574                 break;
1575         case PHY_ID_RTL8201E:
1576         case PHY_ID_BCMAC131:
1577                 phydev->interface = PHY_INTERFACE_MODE_MII;
1578                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1579                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1580                 break;
1581         }
1582
1583         tg3_flag_set(tp, MDIOBUS_INITED);
1584
1585         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1586                 tg3_mdio_config_5785(tp);
1587
1588         return 0;
1589 }
1590
1591 static void tg3_mdio_fini(struct tg3 *tp)
1592 {
1593         if (tg3_flag(tp, MDIOBUS_INITED)) {
1594                 tg3_flag_clear(tp, MDIOBUS_INITED);
1595                 mdiobus_unregister(tp->mdio_bus);
1596                 mdiobus_free(tp->mdio_bus);
1597         }
1598 }
1599
1600 /* tp->lock is held. */
1601 static inline void tg3_generate_fw_event(struct tg3 *tp)
1602 {
1603         u32 val;
1604
1605         val = tr32(GRC_RX_CPU_EVENT);
1606         val |= GRC_RX_CPU_DRIVER_EVENT;
1607         tw32_f(GRC_RX_CPU_EVENT, val);
1608
1609         tp->last_event_jiffies = jiffies;
1610 }
1611
1612 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1613
1614 /* tp->lock is held. */
1615 static void tg3_wait_for_event_ack(struct tg3 *tp)
1616 {
1617         int i;
1618         unsigned int delay_cnt;
1619         long time_remain;
1620
1621         /* If enough time has passed, no wait is necessary. */
1622         time_remain = (long)(tp->last_event_jiffies + 1 +
1623                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1624                       (long)jiffies;
1625         if (time_remain < 0)
1626                 return;
1627
1628         /* Check if we can shorten the wait time. */
1629         delay_cnt = jiffies_to_usecs(time_remain);
1630         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1631                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1632         delay_cnt = (delay_cnt >> 3) + 1;
1633
1634         for (i = 0; i < delay_cnt; i++) {
1635                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1636                         break;
1637                 if (pci_channel_offline(tp->pdev))
1638                         break;
1639
1640                 udelay(8);
1641         }
1642 }
1643
1644 /* tp->lock is held. */
1645 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1646 {
1647         u32 reg, val;
1648
1649         val = 0;
1650         if (!tg3_readphy(tp, MII_BMCR, &reg))
1651                 val = reg << 16;
1652         if (!tg3_readphy(tp, MII_BMSR, &reg))
1653                 val |= (reg & 0xffff);
1654         *data++ = val;
1655
1656         val = 0;
1657         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1658                 val = reg << 16;
1659         if (!tg3_readphy(tp, MII_LPA, &reg))
1660                 val |= (reg & 0xffff);
1661         *data++ = val;
1662
1663         val = 0;
1664         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1665                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1666                         val = reg << 16;
1667                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1668                         val |= (reg & 0xffff);
1669         }
1670         *data++ = val;
1671
1672         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1673                 val = reg << 16;
1674         else
1675                 val = 0;
1676         *data++ = val;
1677 }
1678
1679 /* tp->lock is held. */
1680 static void tg3_ump_link_report(struct tg3 *tp)
1681 {
1682         u32 data[4];
1683
1684         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1685                 return;
1686
1687         tg3_phy_gather_ump_data(tp, data);
1688
1689         tg3_wait_for_event_ack(tp);
1690
1691         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1692         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1693         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1694         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1695         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1696         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1697
1698         tg3_generate_fw_event(tp);
1699 }
1700
1701 /* tp->lock is held. */
1702 static void tg3_stop_fw(struct tg3 *tp)
1703 {
1704         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1705                 /* Wait for RX cpu to ACK the previous event. */
1706                 tg3_wait_for_event_ack(tp);
1707
1708                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1709
1710                 tg3_generate_fw_event(tp);
1711
1712                 /* Wait for RX cpu to ACK this event. */
1713                 tg3_wait_for_event_ack(tp);
1714         }
1715 }
1716
1717 /* tp->lock is held. */
1718 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1719 {
1720         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1721                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1722
1723         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1724                 switch (kind) {
1725                 case RESET_KIND_INIT:
1726                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1727                                       DRV_STATE_START);
1728                         break;
1729
1730                 case RESET_KIND_SHUTDOWN:
1731                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1732                                       DRV_STATE_UNLOAD);
1733                         break;
1734
1735                 case RESET_KIND_SUSPEND:
1736                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1737                                       DRV_STATE_SUSPEND);
1738                         break;
1739
1740                 default:
1741                         break;
1742                 }
1743         }
1744 }
1745
1746 /* tp->lock is held. */
1747 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1748 {
1749         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1750                 switch (kind) {
1751                 case RESET_KIND_INIT:
1752                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1753                                       DRV_STATE_START_DONE);
1754                         break;
1755
1756                 case RESET_KIND_SHUTDOWN:
1757                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1758                                       DRV_STATE_UNLOAD_DONE);
1759                         break;
1760
1761                 default:
1762                         break;
1763                 }
1764         }
1765 }
1766
1767 /* tp->lock is held. */
1768 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1769 {
1770         if (tg3_flag(tp, ENABLE_ASF)) {
1771                 switch (kind) {
1772                 case RESET_KIND_INIT:
1773                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1774                                       DRV_STATE_START);
1775                         break;
1776
1777                 case RESET_KIND_SHUTDOWN:
1778                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1779                                       DRV_STATE_UNLOAD);
1780                         break;
1781
1782                 case RESET_KIND_SUSPEND:
1783                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1784                                       DRV_STATE_SUSPEND);
1785                         break;
1786
1787                 default:
1788                         break;
1789                 }
1790         }
1791 }
1792
1793 static int tg3_poll_fw(struct tg3 *tp)
1794 {
1795         int i;
1796         u32 val;
1797
1798         if (tg3_flag(tp, NO_FWARE_REPORTED))
1799                 return 0;
1800
1801         if (tg3_flag(tp, IS_SSB_CORE)) {
1802                 /* We don't use firmware. */
1803                 return 0;
1804         }
1805
1806         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1807                 /* Wait up to 20ms for init done. */
1808                 for (i = 0; i < 200; i++) {
1809                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1810                                 return 0;
1811                         if (pci_channel_offline(tp->pdev))
1812                                 return -ENODEV;
1813
1814                         udelay(100);
1815                 }
1816                 return -ENODEV;
1817         }
1818
1819         /* Wait for firmware initialization to complete. */
1820         for (i = 0; i < 100000; i++) {
1821                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1822                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1823                         break;
1824                 if (pci_channel_offline(tp->pdev)) {
1825                         if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1826                                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1827                                 netdev_info(tp->dev, "No firmware running\n");
1828                         }
1829
1830                         break;
1831                 }
1832
1833                 udelay(10);
1834         }
1835
1836         /* Chip might not be fitted with firmware.  Some Sun onboard
1837          * parts are configured like that.  So don't signal the timeout
1838          * of the above loop as an error, but do report the lack of
1839          * running firmware once.
1840          */
1841         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1842                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1843
1844                 netdev_info(tp->dev, "No firmware running\n");
1845         }
1846
1847         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1848                 /* The 57765 A0 needs a little more
1849                  * time to do some important work.
1850                  */
1851                 mdelay(10);
1852         }
1853
1854         return 0;
1855 }
1856
1857 static void tg3_link_report(struct tg3 *tp)
1858 {
1859         if (!netif_carrier_ok(tp->dev)) {
1860                 netif_info(tp, link, tp->dev, "Link is down\n");
1861                 tg3_ump_link_report(tp);
1862         } else if (netif_msg_link(tp)) {
1863                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1864                             (tp->link_config.active_speed == SPEED_1000 ?
1865                              1000 :
1866                              (tp->link_config.active_speed == SPEED_100 ?
1867                               100 : 10)),
1868                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1869                              "full" : "half"));
1870
1871                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1872                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1873                             "on" : "off",
1874                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1875                             "on" : "off");
1876
1877                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1878                         netdev_info(tp->dev, "EEE is %s\n",
1879                                     tp->setlpicnt ? "enabled" : "disabled");
1880
1881                 tg3_ump_link_report(tp);
1882         }
1883
1884         tp->link_up = netif_carrier_ok(tp->dev);
1885 }
1886
1887 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1888 {
1889         u32 flowctrl = 0;
1890
1891         if (adv & ADVERTISE_PAUSE_CAP) {
1892                 flowctrl |= FLOW_CTRL_RX;
1893                 if (!(adv & ADVERTISE_PAUSE_ASYM))
1894                         flowctrl |= FLOW_CTRL_TX;
1895         } else if (adv & ADVERTISE_PAUSE_ASYM)
1896                 flowctrl |= FLOW_CTRL_TX;
1897
1898         return flowctrl;
1899 }
1900
1901 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1902 {
1903         u16 miireg;
1904
1905         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1906                 miireg = ADVERTISE_1000XPAUSE;
1907         else if (flow_ctrl & FLOW_CTRL_TX)
1908                 miireg = ADVERTISE_1000XPSE_ASYM;
1909         else if (flow_ctrl & FLOW_CTRL_RX)
1910                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1911         else
1912                 miireg = 0;
1913
1914         return miireg;
1915 }
1916
1917 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1918 {
1919         u32 flowctrl = 0;
1920
1921         if (adv & ADVERTISE_1000XPAUSE) {
1922                 flowctrl |= FLOW_CTRL_RX;
1923                 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1924                         flowctrl |= FLOW_CTRL_TX;
1925         } else if (adv & ADVERTISE_1000XPSE_ASYM)
1926                 flowctrl |= FLOW_CTRL_TX;
1927
1928         return flowctrl;
1929 }
1930
1931 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1932 {
1933         u8 cap = 0;
1934
1935         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1936                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1937         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1938                 if (lcladv & ADVERTISE_1000XPAUSE)
1939                         cap = FLOW_CTRL_RX;
1940                 if (rmtadv & ADVERTISE_1000XPAUSE)
1941                         cap = FLOW_CTRL_TX;
1942         }
1943
1944         return cap;
1945 }
1946
1947 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1948 {
1949         u8 autoneg;
1950         u8 flowctrl = 0;
1951         u32 old_rx_mode = tp->rx_mode;
1952         u32 old_tx_mode = tp->tx_mode;
1953
1954         if (tg3_flag(tp, USE_PHYLIB))
1955                 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1956         else
1957                 autoneg = tp->link_config.autoneg;
1958
1959         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1960                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1961                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1962                 else
1963                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1964         } else
1965                 flowctrl = tp->link_config.flowctrl;
1966
1967         tp->link_config.active_flowctrl = flowctrl;
1968
1969         if (flowctrl & FLOW_CTRL_RX)
1970                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1971         else
1972                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1973
1974         if (old_rx_mode != tp->rx_mode)
1975                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1976
1977         if (flowctrl & FLOW_CTRL_TX)
1978                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1979         else
1980                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1981
1982         if (old_tx_mode != tp->tx_mode)
1983                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1984 }
1985
1986 static void tg3_adjust_link(struct net_device *dev)
1987 {
1988         u8 oldflowctrl, linkmesg = 0;
1989         u32 mac_mode, lcl_adv, rmt_adv;
1990         struct tg3 *tp = netdev_priv(dev);
1991         struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1992
1993         spin_lock_bh(&tp->lock);
1994
1995         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1996                                     MAC_MODE_HALF_DUPLEX);
1997
1998         oldflowctrl = tp->link_config.active_flowctrl;
1999
2000         if (phydev->link) {
2001                 lcl_adv = 0;
2002                 rmt_adv = 0;
2003
2004                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2005                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2006                 else if (phydev->speed == SPEED_1000 ||
2007                          tg3_asic_rev(tp) != ASIC_REV_5785)
2008                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
2009                 else
2010                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2011
2012                 if (phydev->duplex == DUPLEX_HALF)
2013                         mac_mode |= MAC_MODE_HALF_DUPLEX;
2014                 else {
2015                         lcl_adv = mii_advertise_flowctrl(
2016                                   tp->link_config.flowctrl);
2017
2018                         if (phydev->pause)
2019                                 rmt_adv = LPA_PAUSE_CAP;
2020                         if (phydev->asym_pause)
2021                                 rmt_adv |= LPA_PAUSE_ASYM;
2022                 }
2023
2024                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2025         } else
2026                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2027
2028         if (mac_mode != tp->mac_mode) {
2029                 tp->mac_mode = mac_mode;
2030                 tw32_f(MAC_MODE, tp->mac_mode);
2031                 udelay(40);
2032         }
2033
2034         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2035                 if (phydev->speed == SPEED_10)
2036                         tw32(MAC_MI_STAT,
2037                              MAC_MI_STAT_10MBPS_MODE |
2038                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2039                 else
2040                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2041         }
2042
2043         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2044                 tw32(MAC_TX_LENGTHS,
2045                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2046                       (6 << TX_LENGTHS_IPG_SHIFT) |
2047                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2048         else
2049                 tw32(MAC_TX_LENGTHS,
2050                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2051                       (6 << TX_LENGTHS_IPG_SHIFT) |
2052                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2053
2054         if (phydev->link != tp->old_link ||
2055             phydev->speed != tp->link_config.active_speed ||
2056             phydev->duplex != tp->link_config.active_duplex ||
2057             oldflowctrl != tp->link_config.active_flowctrl)
2058                 linkmesg = 1;
2059
2060         tp->old_link = phydev->link;
2061         tp->link_config.active_speed = phydev->speed;
2062         tp->link_config.active_duplex = phydev->duplex;
2063
2064         spin_unlock_bh(&tp->lock);
2065
2066         if (linkmesg)
2067                 tg3_link_report(tp);
2068 }
2069
2070 static int tg3_phy_init(struct tg3 *tp)
2071 {
2072         struct phy_device *phydev;
2073
2074         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2075                 return 0;
2076
2077         /* Bring the PHY back to a known state. */
2078         tg3_bmcr_reset(tp);
2079
2080         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2081
2082         /* Attach the MAC to the PHY. */
2083         phydev = phy_connect(tp->dev, phydev_name(phydev),
2084                              tg3_adjust_link, phydev->interface);
2085         if (IS_ERR(phydev)) {
2086                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2087                 return PTR_ERR(phydev);
2088         }
2089
2090         /* Mask with MAC supported features. */
2091         switch (phydev->interface) {
2092         case PHY_INTERFACE_MODE_GMII:
2093         case PHY_INTERFACE_MODE_RGMII:
2094                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2095                         phy_set_max_speed(phydev, SPEED_1000);
2096                         phy_support_asym_pause(phydev);
2097                         break;
2098                 }
2099                 fallthrough;
2100         case PHY_INTERFACE_MODE_MII:
2101                 phy_set_max_speed(phydev, SPEED_100);
2102                 phy_support_asym_pause(phydev);
2103                 break;
2104         default:
2105                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2106                 return -EINVAL;
2107         }
2108
2109         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2110
2111         phy_attached_info(phydev);
2112
2113         return 0;
2114 }
2115
2116 static void tg3_phy_start(struct tg3 *tp)
2117 {
2118         struct phy_device *phydev;
2119
2120         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2121                 return;
2122
2123         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2124
2125         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2126                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2127                 phydev->speed = tp->link_config.speed;
2128                 phydev->duplex = tp->link_config.duplex;
2129                 phydev->autoneg = tp->link_config.autoneg;
2130                 ethtool_convert_legacy_u32_to_link_mode(
2131                         phydev->advertising, tp->link_config.advertising);
2132         }
2133
2134         phy_start(phydev);
2135
2136         phy_start_aneg(phydev);
2137 }
2138
2139 static void tg3_phy_stop(struct tg3 *tp)
2140 {
2141         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2142                 return;
2143
2144         phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2145 }
2146
2147 static void tg3_phy_fini(struct tg3 *tp)
2148 {
2149         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2150                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2151                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2152         }
2153 }
2154
2155 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2156 {
2157         int err;
2158         u32 val;
2159
2160         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2161                 return 0;
2162
2163         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2164                 /* Cannot do read-modify-write on 5401 */
2165                 err = tg3_phy_auxctl_write(tp,
2166                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2167                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2168                                            0x4c20);
2169                 goto done;
2170         }
2171
2172         err = tg3_phy_auxctl_read(tp,
2173                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2174         if (err)
2175                 return err;
2176
2177         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2178         err = tg3_phy_auxctl_write(tp,
2179                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2180
2181 done:
2182         return err;
2183 }
2184
2185 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2186 {
2187         u32 phytest;
2188
2189         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2190                 u32 phy;
2191
2192                 tg3_writephy(tp, MII_TG3_FET_TEST,
2193                              phytest | MII_TG3_FET_SHADOW_EN);
2194                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2195                         if (enable)
2196                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2197                         else
2198                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2199                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2200                 }
2201                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2202         }
2203 }
2204
2205 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2206 {
2207         u32 reg;
2208
2209         if (!tg3_flag(tp, 5705_PLUS) ||
2210             (tg3_flag(tp, 5717_PLUS) &&
2211              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2212                 return;
2213
2214         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2215                 tg3_phy_fet_toggle_apd(tp, enable);
2216                 return;
2217         }
2218
2219         reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2220               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2221               MII_TG3_MISC_SHDW_SCR5_SDTL |
2222               MII_TG3_MISC_SHDW_SCR5_C125OE;
2223         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2224                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2225
2226         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2227
2228
2229         reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2230         if (enable)
2231                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2232
2233         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2234 }
2235
2236 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2237 {
2238         u32 phy;
2239
2240         if (!tg3_flag(tp, 5705_PLUS) ||
2241             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2242                 return;
2243
2244         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2245                 u32 ephy;
2246
2247                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2248                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2249
2250                         tg3_writephy(tp, MII_TG3_FET_TEST,
2251                                      ephy | MII_TG3_FET_SHADOW_EN);
2252                         if (!tg3_readphy(tp, reg, &phy)) {
2253                                 if (enable)
2254                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2255                                 else
2256                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2257                                 tg3_writephy(tp, reg, phy);
2258                         }
2259                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2260                 }
2261         } else {
2262                 int ret;
2263
2264                 ret = tg3_phy_auxctl_read(tp,
2265                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2266                 if (!ret) {
2267                         if (enable)
2268                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2269                         else
2270                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2271                         tg3_phy_auxctl_write(tp,
2272                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2273                 }
2274         }
2275 }
2276
2277 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2278 {
2279         int ret;
2280         u32 val;
2281
2282         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2283                 return;
2284
2285         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2286         if (!ret)
2287                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2288                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2289 }
2290
2291 static void tg3_phy_apply_otp(struct tg3 *tp)
2292 {
2293         u32 otp, phy;
2294
2295         if (!tp->phy_otp)
2296                 return;
2297
2298         otp = tp->phy_otp;
2299
2300         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2301                 return;
2302
2303         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2304         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2305         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2306
2307         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2308               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2309         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2310
2311         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2312         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2313         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2314
2315         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2316         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2317
2318         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2319         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2320
2321         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2322               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2323         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2324
2325         tg3_phy_toggle_auxctl_smdsp(tp, false);
2326 }
2327
2328 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2329 {
2330         u32 val;
2331         struct ethtool_eee *dest = &tp->eee;
2332
2333         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2334                 return;
2335
2336         if (eee)
2337                 dest = eee;
2338
2339         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2340                 return;
2341
2342         /* Pull eee_active */
2343         if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2344             val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2345                 dest->eee_active = 1;
2346         } else
2347                 dest->eee_active = 0;
2348
2349         /* Pull lp advertised settings */
2350         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2351                 return;
2352         dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2353
2354         /* Pull advertised and eee_enabled settings */
2355         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2356                 return;
2357         dest->eee_enabled = !!val;
2358         dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2359
2360         /* Pull tx_lpi_enabled */
2361         val = tr32(TG3_CPMU_EEE_MODE);
2362         dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2363
2364         /* Pull lpi timer value */
2365         dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2366 }
2367
2368 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2369 {
2370         u32 val;
2371
2372         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2373                 return;
2374
2375         tp->setlpicnt = 0;
2376
2377         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2378             current_link_up &&
2379             tp->link_config.active_duplex == DUPLEX_FULL &&
2380             (tp->link_config.active_speed == SPEED_100 ||
2381              tp->link_config.active_speed == SPEED_1000)) {
2382                 u32 eeectl;
2383
2384                 if (tp->link_config.active_speed == SPEED_1000)
2385                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2386                 else
2387                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2388
2389                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2390
2391                 tg3_eee_pull_config(tp, NULL);
2392                 if (tp->eee.eee_active)
2393                         tp->setlpicnt = 2;
2394         }
2395
2396         if (!tp->setlpicnt) {
2397                 if (current_link_up &&
2398                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2399                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2400                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2401                 }
2402
2403                 val = tr32(TG3_CPMU_EEE_MODE);
2404                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2405         }
2406 }
2407
2408 static void tg3_phy_eee_enable(struct tg3 *tp)
2409 {
2410         u32 val;
2411
2412         if (tp->link_config.active_speed == SPEED_1000 &&
2413             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2414              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2415              tg3_flag(tp, 57765_CLASS)) &&
2416             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2417                 val = MII_TG3_DSP_TAP26_ALNOKO |
2418                       MII_TG3_DSP_TAP26_RMRXSTO;
2419                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2420                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2421         }
2422
2423         val = tr32(TG3_CPMU_EEE_MODE);
2424         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2425 }
2426
2427 static int tg3_wait_macro_done(struct tg3 *tp)
2428 {
2429         int limit = 100;
2430
2431         while (limit--) {
2432                 u32 tmp32;
2433
2434                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2435                         if ((tmp32 & 0x1000) == 0)
2436                                 break;
2437                 }
2438         }
2439         if (limit < 0)
2440                 return -EBUSY;
2441
2442         return 0;
2443 }
2444
2445 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2446 {
2447         static const u32 test_pat[4][6] = {
2448         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2449         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2450         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2451         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2452         };
2453         int chan;
2454
2455         for (chan = 0; chan < 4; chan++) {
2456                 int i;
2457
2458                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2459                              (chan * 0x2000) | 0x0200);
2460                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2461
2462                 for (i = 0; i < 6; i++)
2463                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2464                                      test_pat[chan][i]);
2465
2466                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2467                 if (tg3_wait_macro_done(tp)) {
2468                         *resetp = 1;
2469                         return -EBUSY;
2470                 }
2471
2472                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2473                              (chan * 0x2000) | 0x0200);
2474                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2475                 if (tg3_wait_macro_done(tp)) {
2476                         *resetp = 1;
2477                         return -EBUSY;
2478                 }
2479
2480                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2481                 if (tg3_wait_macro_done(tp)) {
2482                         *resetp = 1;
2483                         return -EBUSY;
2484                 }
2485
2486                 for (i = 0; i < 6; i += 2) {
2487                         u32 low, high;
2488
2489                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2490                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2491                             tg3_wait_macro_done(tp)) {
2492                                 *resetp = 1;
2493                                 return -EBUSY;
2494                         }
2495                         low &= 0x7fff;
2496                         high &= 0x000f;
2497                         if (low != test_pat[chan][i] ||
2498                             high != test_pat[chan][i+1]) {
2499                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2500                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2501                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2502
2503                                 return -EBUSY;
2504                         }
2505                 }
2506         }
2507
2508         return 0;
2509 }
2510
2511 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2512 {
2513         int chan;
2514
2515         for (chan = 0; chan < 4; chan++) {
2516                 int i;
2517
2518                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2519                              (chan * 0x2000) | 0x0200);
2520                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2521                 for (i = 0; i < 6; i++)
2522                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2523                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2524                 if (tg3_wait_macro_done(tp))
2525                         return -EBUSY;
2526         }
2527
2528         return 0;
2529 }
2530
2531 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2532 {
2533         u32 reg32, phy9_orig;
2534         int retries, do_phy_reset, err;
2535
2536         retries = 10;
2537         do_phy_reset = 1;
2538         do {
2539                 if (do_phy_reset) {
2540                         err = tg3_bmcr_reset(tp);
2541                         if (err)
2542                                 return err;
2543                         do_phy_reset = 0;
2544                 }
2545
2546                 /* Disable transmitter and interrupt.  */
2547                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2548                         continue;
2549
2550                 reg32 |= 0x3000;
2551                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2552
2553                 /* Set full-duplex, 1000 mbps.  */
2554                 tg3_writephy(tp, MII_BMCR,
2555                              BMCR_FULLDPLX | BMCR_SPEED1000);
2556
2557                 /* Set to master mode.  */
2558                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2559                         continue;
2560
2561                 tg3_writephy(tp, MII_CTRL1000,
2562                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2563
2564                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2565                 if (err)
2566                         return err;
2567
2568                 /* Block the PHY control access.  */
2569                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2570
2571                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2572                 if (!err)
2573                         break;
2574         } while (--retries);
2575
2576         err = tg3_phy_reset_chanpat(tp);
2577         if (err)
2578                 return err;
2579
2580         tg3_phydsp_write(tp, 0x8005, 0x0000);
2581
2582         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2583         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2584
2585         tg3_phy_toggle_auxctl_smdsp(tp, false);
2586
2587         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2588
2589         err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2590         if (err)
2591                 return err;
2592
2593         reg32 &= ~0x3000;
2594         tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2595
2596         return 0;
2597 }
2598
2599 static void tg3_carrier_off(struct tg3 *tp)
2600 {
2601         netif_carrier_off(tp->dev);
2602         tp->link_up = false;
2603 }
2604
2605 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2606 {
2607         if (tg3_flag(tp, ENABLE_ASF))
2608                 netdev_warn(tp->dev,
2609                             "Management side-band traffic will be interrupted during phy settings change\n");
2610 }
2611
2612 /* This will reset the tigon3 PHY if there is no valid
2613  * link unless the FORCE argument is non-zero.
2614  */
2615 static int tg3_phy_reset(struct tg3 *tp)
2616 {
2617         u32 val, cpmuctrl;
2618         int err;
2619
2620         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2621                 val = tr32(GRC_MISC_CFG);
2622                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2623                 udelay(40);
2624         }
2625         err  = tg3_readphy(tp, MII_BMSR, &val);
2626         err |= tg3_readphy(tp, MII_BMSR, &val);
2627         if (err != 0)
2628                 return -EBUSY;
2629
2630         if (netif_running(tp->dev) && tp->link_up) {
2631                 netif_carrier_off(tp->dev);
2632                 tg3_link_report(tp);
2633         }
2634
2635         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2636             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2637             tg3_asic_rev(tp) == ASIC_REV_5705) {
2638                 err = tg3_phy_reset_5703_4_5(tp);
2639                 if (err)
2640                         return err;
2641                 goto out;
2642         }
2643
2644         cpmuctrl = 0;
2645         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2646             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2647                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2648                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2649                         tw32(TG3_CPMU_CTRL,
2650                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2651         }
2652
2653         err = tg3_bmcr_reset(tp);
2654         if (err)
2655                 return err;
2656
2657         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2658                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2659                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2660
2661                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2662         }
2663
2664         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2665             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2666                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2667                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2668                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2669                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2670                         udelay(40);
2671                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2672                 }
2673         }
2674
2675         if (tg3_flag(tp, 5717_PLUS) &&
2676             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2677                 return 0;
2678
2679         tg3_phy_apply_otp(tp);
2680
2681         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2682                 tg3_phy_toggle_apd(tp, true);
2683         else
2684                 tg3_phy_toggle_apd(tp, false);
2685
2686 out:
2687         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2688             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2689                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2690                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2691                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2692         }
2693
2694         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2695                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2696                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2697         }
2698
2699         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2700                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2701                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2702                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2703                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2704                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2705                 }
2706         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2707                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2708                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2709                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2710                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2711                                 tg3_writephy(tp, MII_TG3_TEST1,
2712                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2713                         } else
2714                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2715
2716                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2717                 }
2718         }
2719
2720         /* Set Extended packet length bit (bit 14) on all chips that */
2721         /* support jumbo frames */
2722         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2723                 /* Cannot do read-modify-write on 5401 */
2724                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2725         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2726                 /* Set bit 14 with read-modify-write to preserve other bits */
2727                 err = tg3_phy_auxctl_read(tp,
2728                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2729                 if (!err)
2730                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2731                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2732         }
2733
2734         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2735          * jumbo frames transmission.
2736          */
2737         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2738                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2739                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2740                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2741         }
2742
2743         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2744                 /* adjust output voltage */
2745                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2746         }
2747
2748         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2749                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2750
2751         tg3_phy_toggle_automdix(tp, true);
2752         tg3_phy_set_wirespeed(tp);
2753         return 0;
2754 }
2755
2756 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2757 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2758 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2759                                           TG3_GPIO_MSG_NEED_VAUX)
2760 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2761         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2762          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2763          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2764          (TG3_GPIO_MSG_DRVR_PRES << 12))
2765
2766 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2767         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2768          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2769          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2770          (TG3_GPIO_MSG_NEED_VAUX << 12))
2771
2772 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2773 {
2774         u32 status, shift;
2775
2776         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2777             tg3_asic_rev(tp) == ASIC_REV_5719)
2778                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2779         else
2780                 status = tr32(TG3_CPMU_DRV_STATUS);
2781
2782         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2783         status &= ~(TG3_GPIO_MSG_MASK << shift);
2784         status |= (newstat << shift);
2785
2786         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2787             tg3_asic_rev(tp) == ASIC_REV_5719)
2788                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2789         else
2790                 tw32(TG3_CPMU_DRV_STATUS, status);
2791
2792         return status >> TG3_APE_GPIO_MSG_SHIFT;
2793 }
2794
2795 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2796 {
2797         if (!tg3_flag(tp, IS_NIC))
2798                 return 0;
2799
2800         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2801             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2802             tg3_asic_rev(tp) == ASIC_REV_5720) {
2803                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2804                         return -EIO;
2805
2806                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2807
2808                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2809                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2810
2811                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2812         } else {
2813                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2814                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2815         }
2816
2817         return 0;
2818 }
2819
2820 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2821 {
2822         u32 grc_local_ctrl;
2823
2824         if (!tg3_flag(tp, IS_NIC) ||
2825             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2826             tg3_asic_rev(tp) == ASIC_REV_5701)
2827                 return;
2828
2829         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2830
2831         tw32_wait_f(GRC_LOCAL_CTRL,
2832                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2833                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2834
2835         tw32_wait_f(GRC_LOCAL_CTRL,
2836                     grc_local_ctrl,
2837                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2838
2839         tw32_wait_f(GRC_LOCAL_CTRL,
2840                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2841                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2842 }
2843
2844 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2845 {
2846         if (!tg3_flag(tp, IS_NIC))
2847                 return;
2848
2849         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2850             tg3_asic_rev(tp) == ASIC_REV_5701) {
2851                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2852                             (GRC_LCLCTRL_GPIO_OE0 |
2853                              GRC_LCLCTRL_GPIO_OE1 |
2854                              GRC_LCLCTRL_GPIO_OE2 |
2855                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2856                              GRC_LCLCTRL_GPIO_OUTPUT1),
2857                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2858         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2859                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2860                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2861                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2862                                      GRC_LCLCTRL_GPIO_OE1 |
2863                                      GRC_LCLCTRL_GPIO_OE2 |
2864                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2865                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2866                                      tp->grc_local_ctrl;
2867                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2868                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2869
2870                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2871                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2872                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2873
2874                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2875                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2876                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2877         } else {
2878                 u32 no_gpio2;
2879                 u32 grc_local_ctrl = 0;
2880
2881                 /* Workaround to prevent overdrawing Amps. */
2882                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2883                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2884                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2885                                     grc_local_ctrl,
2886                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2887                 }
2888
2889                 /* On 5753 and variants, GPIO2 cannot be used. */
2890                 no_gpio2 = tp->nic_sram_data_cfg &
2891                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2892
2893                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2894                                   GRC_LCLCTRL_GPIO_OE1 |
2895                                   GRC_LCLCTRL_GPIO_OE2 |
2896                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2897                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2898                 if (no_gpio2) {
2899                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2900                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2901                 }
2902                 tw32_wait_f(GRC_LOCAL_CTRL,
2903                             tp->grc_local_ctrl | grc_local_ctrl,
2904                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2905
2906                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2907
2908                 tw32_wait_f(GRC_LOCAL_CTRL,
2909                             tp->grc_local_ctrl | grc_local_ctrl,
2910                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2911
2912                 if (!no_gpio2) {
2913                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2914                         tw32_wait_f(GRC_LOCAL_CTRL,
2915                                     tp->grc_local_ctrl | grc_local_ctrl,
2916                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2917                 }
2918         }
2919 }
2920
2921 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2922 {
2923         u32 msg = 0;
2924
2925         /* Serialize power state transitions */
2926         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2927                 return;
2928
2929         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2930                 msg = TG3_GPIO_MSG_NEED_VAUX;
2931
2932         msg = tg3_set_function_status(tp, msg);
2933
2934         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2935                 goto done;
2936
2937         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2938                 tg3_pwrsrc_switch_to_vaux(tp);
2939         else
2940                 tg3_pwrsrc_die_with_vmain(tp);
2941
2942 done:
2943         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2944 }
2945
2946 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2947 {
2948         bool need_vaux = false;
2949
2950         /* The GPIOs do something completely different on 57765. */
2951         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2952                 return;
2953
2954         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2955             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2956             tg3_asic_rev(tp) == ASIC_REV_5720) {
2957                 tg3_frob_aux_power_5717(tp, include_wol ?
2958                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2959                 return;
2960         }
2961
2962         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2963                 struct net_device *dev_peer;
2964
2965                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2966
2967                 /* remove_one() may have been run on the peer. */
2968                 if (dev_peer) {
2969                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2970
2971                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2972                                 return;
2973
2974                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2975                             tg3_flag(tp_peer, ENABLE_ASF))
2976                                 need_vaux = true;
2977                 }
2978         }
2979
2980         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2981             tg3_flag(tp, ENABLE_ASF))
2982                 need_vaux = true;
2983
2984         if (need_vaux)
2985                 tg3_pwrsrc_switch_to_vaux(tp);
2986         else
2987                 tg3_pwrsrc_die_with_vmain(tp);
2988 }
2989
2990 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2991 {
2992         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2993                 return 1;
2994         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2995                 if (speed != SPEED_10)
2996                         return 1;
2997         } else if (speed == SPEED_10)
2998                 return 1;
2999
3000         return 0;
3001 }
3002
3003 static bool tg3_phy_power_bug(struct tg3 *tp)
3004 {
3005         switch (tg3_asic_rev(tp)) {
3006         case ASIC_REV_5700:
3007         case ASIC_REV_5704:
3008                 return true;
3009         case ASIC_REV_5780:
3010                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3011                         return true;
3012                 return false;
3013         case ASIC_REV_5717:
3014                 if (!tp->pci_fn)
3015                         return true;
3016                 return false;
3017         case ASIC_REV_5719:
3018         case ASIC_REV_5720:
3019                 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3020                     !tp->pci_fn)
3021                         return true;
3022                 return false;
3023         }
3024
3025         return false;
3026 }
3027
3028 static bool tg3_phy_led_bug(struct tg3 *tp)
3029 {
3030         switch (tg3_asic_rev(tp)) {
3031         case ASIC_REV_5719:
3032         case ASIC_REV_5720:
3033                 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3034                     !tp->pci_fn)
3035                         return true;
3036                 return false;
3037         }
3038
3039         return false;
3040 }
3041
3042 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3043 {
3044         u32 val;
3045
3046         if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3047                 return;
3048
3049         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3050                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3051                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3052                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3053
3054                         sg_dig_ctrl |=
3055                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3056                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
3057                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3058                 }
3059                 return;
3060         }
3061
3062         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3063                 tg3_bmcr_reset(tp);
3064                 val = tr32(GRC_MISC_CFG);
3065                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3066                 udelay(40);
3067                 return;
3068         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3069                 u32 phytest;
3070                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3071                         u32 phy;
3072
3073                         tg3_writephy(tp, MII_ADVERTISE, 0);
3074                         tg3_writephy(tp, MII_BMCR,
3075                                      BMCR_ANENABLE | BMCR_ANRESTART);
3076
3077                         tg3_writephy(tp, MII_TG3_FET_TEST,
3078                                      phytest | MII_TG3_FET_SHADOW_EN);
3079                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3080                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3081                                 tg3_writephy(tp,
3082                                              MII_TG3_FET_SHDW_AUXMODE4,
3083                                              phy);
3084                         }
3085                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3086                 }
3087                 return;
3088         } else if (do_low_power) {
3089                 if (!tg3_phy_led_bug(tp))
3090                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3091                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3092
3093                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3094                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3095                       MII_TG3_AUXCTL_PCTL_VREG_11V;
3096                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3097         }
3098
3099         /* The PHY should not be powered down on some chips because
3100          * of bugs.
3101          */
3102         if (tg3_phy_power_bug(tp))
3103                 return;
3104
3105         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3106             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3107                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3108                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3109                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3110                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3111         }
3112
3113         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3114 }
3115
3116 /* tp->lock is held. */
3117 static int tg3_nvram_lock(struct tg3 *tp)
3118 {
3119         if (tg3_flag(tp, NVRAM)) {
3120                 int i;
3121
3122                 if (tp->nvram_lock_cnt == 0) {
3123                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3124                         for (i = 0; i < 8000; i++) {
3125                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3126                                         break;
3127                                 udelay(20);
3128                         }
3129                         if (i == 8000) {
3130                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3131                                 return -ENODEV;
3132                         }
3133                 }
3134                 tp->nvram_lock_cnt++;
3135         }
3136         return 0;
3137 }
3138
3139 /* tp->lock is held. */
3140 static void tg3_nvram_unlock(struct tg3 *tp)
3141 {
3142         if (tg3_flag(tp, NVRAM)) {
3143                 if (tp->nvram_lock_cnt > 0)
3144                         tp->nvram_lock_cnt--;
3145                 if (tp->nvram_lock_cnt == 0)
3146                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3147         }
3148 }
3149
3150 /* tp->lock is held. */
3151 static void tg3_enable_nvram_access(struct tg3 *tp)
3152 {
3153         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3154                 u32 nvaccess = tr32(NVRAM_ACCESS);
3155
3156                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3157         }
3158 }
3159
3160 /* tp->lock is held. */
3161 static void tg3_disable_nvram_access(struct tg3 *tp)
3162 {
3163         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3164                 u32 nvaccess = tr32(NVRAM_ACCESS);
3165
3166                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3167         }
3168 }
3169
3170 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3171                                         u32 offset, u32 *val)
3172 {
3173         u32 tmp;
3174         int i;
3175
3176         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3177                 return -EINVAL;
3178
3179         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3180                                         EEPROM_ADDR_DEVID_MASK |
3181                                         EEPROM_ADDR_READ);
3182         tw32(GRC_EEPROM_ADDR,
3183              tmp |
3184              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3185              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3186               EEPROM_ADDR_ADDR_MASK) |
3187              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3188
3189         for (i = 0; i < 1000; i++) {
3190                 tmp = tr32(GRC_EEPROM_ADDR);
3191
3192                 if (tmp & EEPROM_ADDR_COMPLETE)
3193                         break;
3194                 msleep(1);
3195         }
3196         if (!(tmp & EEPROM_ADDR_COMPLETE))
3197                 return -EBUSY;
3198
3199         tmp = tr32(GRC_EEPROM_DATA);
3200
3201         /*
3202          * The data will always be opposite the native endian
3203          * format.  Perform a blind byteswap to compensate.
3204          */
3205         *val = swab32(tmp);
3206
3207         return 0;
3208 }
3209
3210 #define NVRAM_CMD_TIMEOUT 10000
3211
3212 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3213 {
3214         int i;
3215
3216         tw32(NVRAM_CMD, nvram_cmd);
3217         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3218                 usleep_range(10, 40);
3219                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3220                         udelay(10);
3221                         break;
3222                 }
3223         }
3224
3225         if (i == NVRAM_CMD_TIMEOUT)
3226                 return -EBUSY;
3227
3228         return 0;
3229 }
3230
3231 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3232 {
3233         if (tg3_flag(tp, NVRAM) &&
3234             tg3_flag(tp, NVRAM_BUFFERED) &&
3235             tg3_flag(tp, FLASH) &&
3236             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3237             (tp->nvram_jedecnum == JEDEC_ATMEL))
3238
3239                 addr = ((addr / tp->nvram_pagesize) <<
3240                         ATMEL_AT45DB0X1B_PAGE_POS) +
3241                        (addr % tp->nvram_pagesize);
3242
3243         return addr;
3244 }
3245
3246 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3247 {
3248         if (tg3_flag(tp, NVRAM) &&
3249             tg3_flag(tp, NVRAM_BUFFERED) &&
3250             tg3_flag(tp, FLASH) &&
3251             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3252             (tp->nvram_jedecnum == JEDEC_ATMEL))
3253
3254                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3255                         tp->nvram_pagesize) +
3256                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3257
3258         return addr;
3259 }
3260
3261 /* NOTE: Data read in from NVRAM is byteswapped according to
3262  * the byteswapping settings for all other register accesses.
3263  * tg3 devices are BE devices, so on a BE machine, the data
3264  * returned will be exactly as it is seen in NVRAM.  On a LE
3265  * machine, the 32-bit value will be byteswapped.
3266  */
3267 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3268 {
3269         int ret;
3270
3271         if (!tg3_flag(tp, NVRAM))
3272                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3273
3274         offset = tg3_nvram_phys_addr(tp, offset);
3275
3276         if (offset > NVRAM_ADDR_MSK)
3277                 return -EINVAL;
3278
3279         ret = tg3_nvram_lock(tp);
3280         if (ret)
3281                 return ret;
3282
3283         tg3_enable_nvram_access(tp);
3284
3285         tw32(NVRAM_ADDR, offset);
3286         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3287                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3288
3289         if (ret == 0)
3290                 *val = tr32(NVRAM_RDDATA);
3291
3292         tg3_disable_nvram_access(tp);
3293
3294         tg3_nvram_unlock(tp);
3295
3296         return ret;
3297 }
3298
3299 /* Ensures NVRAM data is in bytestream format. */
3300 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3301 {
3302         u32 v;
3303         int res = tg3_nvram_read(tp, offset, &v);
3304         if (!res)
3305                 *val = cpu_to_be32(v);
3306         return res;
3307 }
3308
3309 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3310                                     u32 offset, u32 len, u8 *buf)
3311 {
3312         int i, j, rc = 0;
3313         u32 val;
3314
3315         for (i = 0; i < len; i += 4) {
3316                 u32 addr;
3317                 __be32 data;
3318
3319                 addr = offset + i;
3320
3321                 memcpy(&data, buf + i, 4);
3322
3323                 /*
3324                  * The SEEPROM interface expects the data to always be opposite
3325                  * the native endian format.  We accomplish this by reversing
3326                  * all the operations that would have been performed on the
3327                  * data from a call to tg3_nvram_read_be32().
3328                  */
3329                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3330
3331                 val = tr32(GRC_EEPROM_ADDR);
3332                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3333
3334                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3335                         EEPROM_ADDR_READ);
3336                 tw32(GRC_EEPROM_ADDR, val |
3337                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3338                         (addr & EEPROM_ADDR_ADDR_MASK) |
3339                         EEPROM_ADDR_START |
3340                         EEPROM_ADDR_WRITE);
3341
3342                 for (j = 0; j < 1000; j++) {
3343                         val = tr32(GRC_EEPROM_ADDR);
3344
3345                         if (val & EEPROM_ADDR_COMPLETE)
3346                                 break;
3347                         msleep(1);
3348                 }
3349                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3350                         rc = -EBUSY;
3351                         break;
3352                 }
3353         }
3354
3355         return rc;
3356 }
3357
3358 /* offset and length are dword aligned */
3359 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3360                 u8 *buf)
3361 {
3362         int ret = 0;
3363         u32 pagesize = tp->nvram_pagesize;
3364         u32 pagemask = pagesize - 1;
3365         u32 nvram_cmd;
3366         u8 *tmp;
3367
3368         tmp = kmalloc(pagesize, GFP_KERNEL);
3369         if (tmp == NULL)
3370                 return -ENOMEM;
3371
3372         while (len) {
3373                 int j;
3374                 u32 phy_addr, page_off, size;
3375
3376                 phy_addr = offset & ~pagemask;
3377
3378                 for (j = 0; j < pagesize; j += 4) {
3379                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3380                                                   (__be32 *) (tmp + j));
3381                         if (ret)
3382                                 break;
3383                 }
3384                 if (ret)
3385                         break;
3386
3387                 page_off = offset & pagemask;
3388                 size = pagesize;
3389                 if (len < size)
3390                         size = len;
3391
3392                 len -= size;
3393
3394                 memcpy(tmp + page_off, buf, size);
3395
3396                 offset = offset + (pagesize - page_off);
3397
3398                 tg3_enable_nvram_access(tp);
3399
3400                 /*
3401                  * Before we can erase the flash page, we need
3402                  * to issue a special "write enable" command.
3403                  */
3404                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3405
3406                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3407                         break;
3408
3409                 /* Erase the target page */
3410                 tw32(NVRAM_ADDR, phy_addr);
3411
3412                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3413                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3414
3415                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3416                         break;
3417
3418                 /* Issue another write enable to start the write. */
3419                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3420
3421                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3422                         break;
3423
3424                 for (j = 0; j < pagesize; j += 4) {
3425                         __be32 data;
3426
3427                         data = *((__be32 *) (tmp + j));
3428
3429                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3430
3431                         tw32(NVRAM_ADDR, phy_addr + j);
3432
3433                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3434                                 NVRAM_CMD_WR;
3435
3436                         if (j == 0)
3437                                 nvram_cmd |= NVRAM_CMD_FIRST;
3438                         else if (j == (pagesize - 4))
3439                                 nvram_cmd |= NVRAM_CMD_LAST;
3440
3441                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3442                         if (ret)
3443                                 break;
3444                 }
3445                 if (ret)
3446                         break;
3447         }
3448
3449         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3450         tg3_nvram_exec_cmd(tp, nvram_cmd);
3451
3452         kfree(tmp);
3453
3454         return ret;
3455 }
3456
3457 /* offset and length are dword aligned */
3458 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3459                 u8 *buf)
3460 {
3461         int i, ret = 0;
3462
3463         for (i = 0; i < len; i += 4, offset += 4) {
3464                 u32 page_off, phy_addr, nvram_cmd;
3465                 __be32 data;
3466
3467                 memcpy(&data, buf + i, 4);
3468                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3469
3470                 page_off = offset % tp->nvram_pagesize;
3471
3472                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3473
3474                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3475
3476                 if (page_off == 0 || i == 0)
3477                         nvram_cmd |= NVRAM_CMD_FIRST;
3478                 if (page_off == (tp->nvram_pagesize - 4))
3479                         nvram_cmd |= NVRAM_CMD_LAST;
3480
3481                 if (i == (len - 4))
3482                         nvram_cmd |= NVRAM_CMD_LAST;
3483
3484                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3485                     !tg3_flag(tp, FLASH) ||
3486                     !tg3_flag(tp, 57765_PLUS))
3487                         tw32(NVRAM_ADDR, phy_addr);
3488
3489                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3490                     !tg3_flag(tp, 5755_PLUS) &&
3491                     (tp->nvram_jedecnum == JEDEC_ST) &&
3492                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3493                         u32 cmd;
3494
3495                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3496                         ret = tg3_nvram_exec_cmd(tp, cmd);
3497                         if (ret)
3498                                 break;
3499                 }
3500                 if (!tg3_flag(tp, FLASH)) {
3501                         /* We always do complete word writes to eeprom. */
3502                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3503                 }
3504
3505                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3506                 if (ret)
3507                         break;
3508         }
3509         return ret;
3510 }
3511
3512 /* offset and length are dword aligned */
3513 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3514 {
3515         int ret;
3516
3517         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3518                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3519                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3520                 udelay(40);
3521         }
3522
3523         if (!tg3_flag(tp, NVRAM)) {
3524                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3525         } else {
3526                 u32 grc_mode;
3527
3528                 ret = tg3_nvram_lock(tp);
3529                 if (ret)
3530                         return ret;
3531
3532                 tg3_enable_nvram_access(tp);
3533                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3534                         tw32(NVRAM_WRITE1, 0x406);
3535
3536                 grc_mode = tr32(GRC_MODE);
3537                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3538
3539                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3540                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3541                                 buf);
3542                 } else {
3543                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3544                                 buf);
3545                 }
3546
3547                 grc_mode = tr32(GRC_MODE);
3548                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3549
3550                 tg3_disable_nvram_access(tp);
3551                 tg3_nvram_unlock(tp);
3552         }
3553
3554         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3555                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3556                 udelay(40);
3557         }
3558
3559         return ret;
3560 }
3561
3562 #define RX_CPU_SCRATCH_BASE     0x30000
3563 #define RX_CPU_SCRATCH_SIZE     0x04000
3564 #define TX_CPU_SCRATCH_BASE     0x34000
3565 #define TX_CPU_SCRATCH_SIZE     0x04000
3566
3567 /* tp->lock is held. */
3568 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3569 {
3570         int i;
3571         const int iters = 10000;
3572
3573         for (i = 0; i < iters; i++) {
3574                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3575                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3576                 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3577                         break;
3578                 if (pci_channel_offline(tp->pdev))
3579                         return -EBUSY;
3580         }
3581
3582         return (i == iters) ? -EBUSY : 0;
3583 }
3584
3585 /* tp->lock is held. */
3586 static int tg3_rxcpu_pause(struct tg3 *tp)
3587 {
3588         int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3589
3590         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3591         tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3592         udelay(10);
3593
3594         return rc;
3595 }
3596
3597 /* tp->lock is held. */
3598 static int tg3_txcpu_pause(struct tg3 *tp)
3599 {
3600         return tg3_pause_cpu(tp, TX_CPU_BASE);
3601 }
3602
3603 /* tp->lock is held. */
3604 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3605 {
3606         tw32(cpu_base + CPU_STATE, 0xffffffff);
3607         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3608 }
3609
3610 /* tp->lock is held. */
3611 static void tg3_rxcpu_resume(struct tg3 *tp)
3612 {
3613         tg3_resume_cpu(tp, RX_CPU_BASE);
3614 }
3615
3616 /* tp->lock is held. */
3617 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3618 {
3619         int rc;
3620
3621         BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3622
3623         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3624                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3625
3626                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3627                 return 0;
3628         }
3629         if (cpu_base == RX_CPU_BASE) {
3630                 rc = tg3_rxcpu_pause(tp);
3631         } else {
3632                 /*
3633                  * There is only an Rx CPU for the 5750 derivative in the
3634                  * BCM4785.
3635                  */
3636                 if (tg3_flag(tp, IS_SSB_CORE))
3637                         return 0;
3638
3639                 rc = tg3_txcpu_pause(tp);
3640         }
3641
3642         if (rc) {
3643                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3644                            __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3645                 return -ENODEV;
3646         }
3647
3648         /* Clear firmware's nvram arbitration. */
3649         if (tg3_flag(tp, NVRAM))
3650                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3651         return 0;
3652 }
3653
3654 static int tg3_fw_data_len(struct tg3 *tp,
3655                            const struct tg3_firmware_hdr *fw_hdr)
3656 {
3657         int fw_len;
3658
3659         /* Non fragmented firmware have one firmware header followed by a
3660          * contiguous chunk of data to be written. The length field in that
3661          * header is not the length of data to be written but the complete
3662          * length of the bss. The data length is determined based on
3663          * tp->fw->size minus headers.
3664          *
3665          * Fragmented firmware have a main header followed by multiple
3666          * fragments. Each fragment is identical to non fragmented firmware
3667          * with a firmware header followed by a contiguous chunk of data. In
3668          * the main header, the length field is unused and set to 0xffffffff.
3669          * In each fragment header the length is the entire size of that
3670          * fragment i.e. fragment data + header length. Data length is
3671          * therefore length field in the header minus TG3_FW_HDR_LEN.
3672          */
3673         if (tp->fw_len == 0xffffffff)
3674                 fw_len = be32_to_cpu(fw_hdr->len);
3675         else
3676                 fw_len = tp->fw->size;
3677
3678         return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3679 }
3680
3681 /* tp->lock is held. */
3682 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3683                                  u32 cpu_scratch_base, int cpu_scratch_size,
3684                                  const struct tg3_firmware_hdr *fw_hdr)
3685 {
3686         int err, i;
3687         void (*write_op)(struct tg3 *, u32, u32);
3688         int total_len = tp->fw->size;
3689
3690         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3691                 netdev_err(tp->dev,
3692                            "%s: Trying to load TX cpu firmware which is 5705\n",
3693                            __func__);
3694                 return -EINVAL;
3695         }
3696
3697         if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3698                 write_op = tg3_write_mem;
3699         else
3700                 write_op = tg3_write_indirect_reg32;
3701
3702         if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3703                 /* It is possible that bootcode is still loading at this point.
3704                  * Get the nvram lock first before halting the cpu.
3705                  */
3706                 int lock_err = tg3_nvram_lock(tp);
3707                 err = tg3_halt_cpu(tp, cpu_base);
3708                 if (!lock_err)
3709                         tg3_nvram_unlock(tp);
3710                 if (err)
3711                         goto out;
3712
3713                 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3714                         write_op(tp, cpu_scratch_base + i, 0);
3715                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3716                 tw32(cpu_base + CPU_MODE,
3717                      tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3718         } else {
3719                 /* Subtract additional main header for fragmented firmware and
3720                  * advance to the first fragment
3721                  */
3722                 total_len -= TG3_FW_HDR_LEN;
3723                 fw_hdr++;
3724         }
3725
3726         do {
3727                 u32 *fw_data = (u32 *)(fw_hdr + 1);
3728                 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3729                         write_op(tp, cpu_scratch_base +
3730                                      (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3731                                      (i * sizeof(u32)),
3732                                  be32_to_cpu(fw_data[i]));
3733
3734                 total_len -= be32_to_cpu(fw_hdr->len);
3735
3736                 /* Advance to next fragment */
3737                 fw_hdr = (struct tg3_firmware_hdr *)
3738                          ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3739         } while (total_len > 0);
3740
3741         err = 0;
3742
3743 out:
3744         return err;
3745 }
3746
3747 /* tp->lock is held. */
3748 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3749 {
3750         int i;
3751         const int iters = 5;
3752
3753         tw32(cpu_base + CPU_STATE, 0xffffffff);
3754         tw32_f(cpu_base + CPU_PC, pc);
3755
3756         for (i = 0; i < iters; i++) {
3757                 if (tr32(cpu_base + CPU_PC) == pc)
3758                         break;
3759                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3760                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3761                 tw32_f(cpu_base + CPU_PC, pc);
3762                 udelay(1000);
3763         }
3764
3765         return (i == iters) ? -EBUSY : 0;
3766 }
3767
3768 /* tp->lock is held. */
3769 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3770 {
3771         const struct tg3_firmware_hdr *fw_hdr;
3772         int err;
3773
3774         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3775
3776         /* Firmware blob starts with version numbers, followed by
3777            start address and length. We are setting complete length.
3778            length = end_address_of_bss - start_address_of_text.
3779            Remainder is the blob to be loaded contiguously
3780            from start address. */
3781
3782         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3783                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3784                                     fw_hdr);
3785         if (err)
3786                 return err;
3787
3788         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3789                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3790                                     fw_hdr);
3791         if (err)
3792                 return err;
3793
3794         /* Now startup only the RX cpu. */
3795         err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3796                                        be32_to_cpu(fw_hdr->base_addr));
3797         if (err) {
3798                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3799                            "should be %08x\n", __func__,
3800                            tr32(RX_CPU_BASE + CPU_PC),
3801                                 be32_to_cpu(fw_hdr->base_addr));
3802                 return -ENODEV;
3803         }
3804
3805         tg3_rxcpu_resume(tp);
3806
3807         return 0;
3808 }
3809
3810 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3811 {
3812         const int iters = 1000;
3813         int i;
3814         u32 val;
3815
3816         /* Wait for boot code to complete initialization and enter service
3817          * loop. It is then safe to download service patches
3818          */
3819         for (i = 0; i < iters; i++) {
3820                 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3821                         break;
3822
3823                 udelay(10);
3824         }
3825
3826         if (i == iters) {
3827                 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3828                 return -EBUSY;
3829         }
3830
3831         val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3832         if (val & 0xff) {
3833                 netdev_warn(tp->dev,
3834                             "Other patches exist. Not downloading EEE patch\n");
3835                 return -EEXIST;
3836         }
3837
3838         return 0;
3839 }
3840
3841 /* tp->lock is held. */
3842 static void tg3_load_57766_firmware(struct tg3 *tp)
3843 {
3844         struct tg3_firmware_hdr *fw_hdr;
3845
3846         if (!tg3_flag(tp, NO_NVRAM))
3847                 return;
3848
3849         if (tg3_validate_rxcpu_state(tp))
3850                 return;
3851
3852         if (!tp->fw)
3853                 return;
3854
3855         /* This firmware blob has a different format than older firmware
3856          * releases as given below. The main difference is we have fragmented
3857          * data to be written to non-contiguous locations.
3858          *
3859          * In the beginning we have a firmware header identical to other
3860          * firmware which consists of version, base addr and length. The length
3861          * here is unused and set to 0xffffffff.
3862          *
3863          * This is followed by a series of firmware fragments which are
3864          * individually identical to previous firmware. i.e. they have the
3865          * firmware header and followed by data for that fragment. The version
3866          * field of the individual fragment header is unused.
3867          */
3868
3869         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3870         if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3871                 return;
3872
3873         if (tg3_rxcpu_pause(tp))
3874                 return;
3875
3876         /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3877         tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3878
3879         tg3_rxcpu_resume(tp);
3880 }
3881
3882 /* tp->lock is held. */
3883 static int tg3_load_tso_firmware(struct tg3 *tp)
3884 {
3885         const struct tg3_firmware_hdr *fw_hdr;
3886         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3887         int err;
3888
3889         if (!tg3_flag(tp, FW_TSO))
3890                 return 0;
3891
3892         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3893
3894         /* Firmware blob starts with version numbers, followed by
3895            start address and length. We are setting complete length.
3896            length = end_address_of_bss - start_address_of_text.
3897            Remainder is the blob to be loaded contiguously
3898            from start address. */
3899
3900         cpu_scratch_size = tp->fw_len;
3901
3902         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3903                 cpu_base = RX_CPU_BASE;
3904                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3905         } else {
3906                 cpu_base = TX_CPU_BASE;
3907                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3908                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3909         }
3910
3911         err = tg3_load_firmware_cpu(tp, cpu_base,
3912                                     cpu_scratch_base, cpu_scratch_size,
3913                                     fw_hdr);
3914         if (err)
3915                 return err;
3916
3917         /* Now startup the cpu. */
3918         err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3919                                        be32_to_cpu(fw_hdr->base_addr));
3920         if (err) {
3921                 netdev_err(tp->dev,
3922                            "%s fails to set CPU PC, is %08x should be %08x\n",
3923                            __func__, tr32(cpu_base + CPU_PC),
3924                            be32_to_cpu(fw_hdr->base_addr));
3925                 return -ENODEV;
3926         }
3927
3928         tg3_resume_cpu(tp, cpu_base);
3929         return 0;
3930 }
3931
3932 /* tp->lock is held. */
3933 static void __tg3_set_one_mac_addr(struct tg3 *tp, const u8 *mac_addr,
3934                                    int index)
3935 {
3936         u32 addr_high, addr_low;
3937
3938         addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3939         addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3940                     (mac_addr[4] <<  8) | mac_addr[5]);
3941
3942         if (index < 4) {
3943                 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3944                 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3945         } else {
3946                 index -= 4;
3947                 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3948                 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3949         }
3950 }
3951
3952 /* tp->lock is held. */
3953 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3954 {
3955         u32 addr_high;
3956         int i;
3957
3958         for (i = 0; i < 4; i++) {
3959                 if (i == 1 && skip_mac_1)
3960                         continue;
3961                 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3962         }
3963
3964         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3965             tg3_asic_rev(tp) == ASIC_REV_5704) {
3966                 for (i = 4; i < 16; i++)
3967                         __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3968         }
3969
3970         addr_high = (tp->dev->dev_addr[0] +
3971                      tp->dev->dev_addr[1] +
3972                      tp->dev->dev_addr[2] +
3973                      tp->dev->dev_addr[3] +
3974                      tp->dev->dev_addr[4] +
3975                      tp->dev->dev_addr[5]) &
3976                 TX_BACKOFF_SEED_MASK;
3977         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3978 }
3979
3980 static void tg3_enable_register_access(struct tg3 *tp)
3981 {
3982         /*
3983          * Make sure register accesses (indirect or otherwise) will function
3984          * correctly.
3985          */
3986         pci_write_config_dword(tp->pdev,
3987                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3988 }
3989
3990 static int tg3_power_up(struct tg3 *tp)
3991 {
3992         int err;
3993
3994         tg3_enable_register_access(tp);
3995
3996         err = pci_set_power_state(tp->pdev, PCI_D0);
3997         if (!err) {
3998                 /* Switch out of Vaux if it is a NIC */
3999                 tg3_pwrsrc_switch_to_vmain(tp);
4000         } else {
4001                 netdev_err(tp->dev, "Transition to D0 failed\n");
4002         }
4003
4004         return err;
4005 }
4006
4007 static int tg3_setup_phy(struct tg3 *, bool);
4008
4009 static int tg3_power_down_prepare(struct tg3 *tp)
4010 {
4011         u32 misc_host_ctrl;
4012         bool device_should_wake, do_low_power;
4013
4014         tg3_enable_register_access(tp);
4015
4016         /* Restore the CLKREQ setting. */
4017         if (tg3_flag(tp, CLKREQ_BUG))
4018                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4019                                          PCI_EXP_LNKCTL_CLKREQ_EN);
4020
4021         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4022         tw32(TG3PCI_MISC_HOST_CTRL,
4023              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4024
4025         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4026                              tg3_flag(tp, WOL_ENABLE);
4027
4028         if (tg3_flag(tp, USE_PHYLIB)) {
4029                 do_low_power = false;
4030                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4031                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4032                         __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
4033                         struct phy_device *phydev;
4034                         u32 phyid;
4035
4036                         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4037
4038                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4039
4040                         tp->link_config.speed = phydev->speed;
4041                         tp->link_config.duplex = phydev->duplex;
4042                         tp->link_config.autoneg = phydev->autoneg;
4043                         ethtool_convert_link_mode_to_legacy_u32(
4044                                 &tp->link_config.advertising,
4045                                 phydev->advertising);
4046
4047                         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
4048                         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
4049                                          advertising);
4050                         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
4051                                          advertising);
4052                         linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
4053                                          advertising);
4054
4055                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4056                                 if (tg3_flag(tp, WOL_SPEED_100MB)) {
4057                                         linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
4058                                                          advertising);
4059                                         linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
4060                                                          advertising);
4061                                         linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4062                                                          advertising);
4063                                 } else {
4064                                         linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4065                                                          advertising);
4066                                 }
4067                         }
4068
4069                         linkmode_copy(phydev->advertising, advertising);
4070                         phy_start_aneg(phydev);
4071
4072                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4073                         if (phyid != PHY_ID_BCMAC131) {
4074                                 phyid &= PHY_BCM_OUI_MASK;
4075                                 if (phyid == PHY_BCM_OUI_1 ||
4076                                     phyid == PHY_BCM_OUI_2 ||
4077                                     phyid == PHY_BCM_OUI_3)
4078                                         do_low_power = true;
4079                         }
4080                 }
4081         } else {
4082                 do_low_power = true;
4083
4084                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4085                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4086
4087                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4088                         tg3_setup_phy(tp, false);
4089         }
4090
4091         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4092                 u32 val;
4093
4094                 val = tr32(GRC_VCPU_EXT_CTRL);
4095                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4096         } else if (!tg3_flag(tp, ENABLE_ASF)) {
4097                 int i;
4098                 u32 val;
4099
4100                 for (i = 0; i < 200; i++) {
4101                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4102                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4103                                 break;
4104                         msleep(1);
4105                 }
4106         }
4107         if (tg3_flag(tp, WOL_CAP))
4108                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4109                                                      WOL_DRV_STATE_SHUTDOWN |
4110                                                      WOL_DRV_WOL |
4111                                                      WOL_SET_MAGIC_PKT);
4112
4113         if (device_should_wake) {
4114                 u32 mac_mode;
4115
4116                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4117                         if (do_low_power &&
4118                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4119                                 tg3_phy_auxctl_write(tp,
4120                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4121                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
4122                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4123                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4124                                 udelay(40);
4125                         }
4126
4127                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4128                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
4129                         else if (tp->phy_flags &
4130                                  TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4131                                 if (tp->link_config.active_speed == SPEED_1000)
4132                                         mac_mode = MAC_MODE_PORT_MODE_GMII;
4133                                 else
4134                                         mac_mode = MAC_MODE_PORT_MODE_MII;
4135                         } else
4136                                 mac_mode = MAC_MODE_PORT_MODE_MII;
4137
4138                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4139                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4140                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4141                                              SPEED_100 : SPEED_10;
4142                                 if (tg3_5700_link_polarity(tp, speed))
4143                                         mac_mode |= MAC_MODE_LINK_POLARITY;
4144                                 else
4145                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
4146                         }
4147                 } else {
4148                         mac_mode = MAC_MODE_PORT_MODE_TBI;
4149                 }
4150
4151                 if (!tg3_flag(tp, 5750_PLUS))
4152                         tw32(MAC_LED_CTRL, tp->led_ctrl);
4153
4154                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4155                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4156                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4157                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4158
4159                 if (tg3_flag(tp, ENABLE_APE))
4160                         mac_mode |= MAC_MODE_APE_TX_EN |
4161                                     MAC_MODE_APE_RX_EN |
4162                                     MAC_MODE_TDE_ENABLE;
4163
4164                 tw32_f(MAC_MODE, mac_mode);
4165                 udelay(100);
4166
4167                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4168                 udelay(10);
4169         }
4170
4171         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4172             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4173              tg3_asic_rev(tp) == ASIC_REV_5701)) {
4174                 u32 base_val;
4175
4176                 base_val = tp->pci_clock_ctrl;
4177                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4178                              CLOCK_CTRL_TXCLK_DISABLE);
4179
4180                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4181                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
4182         } else if (tg3_flag(tp, 5780_CLASS) ||
4183                    tg3_flag(tp, CPMU_PRESENT) ||
4184                    tg3_asic_rev(tp) == ASIC_REV_5906) {
4185                 /* do nothing */
4186         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4187                 u32 newbits1, newbits2;
4188
4189                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4190                     tg3_asic_rev(tp) == ASIC_REV_5701) {
4191                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4192                                     CLOCK_CTRL_TXCLK_DISABLE |
4193                                     CLOCK_CTRL_ALTCLK);
4194                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4195                 } else if (tg3_flag(tp, 5705_PLUS)) {
4196                         newbits1 = CLOCK_CTRL_625_CORE;
4197                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4198                 } else {
4199                         newbits1 = CLOCK_CTRL_ALTCLK;
4200                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4201                 }
4202
4203                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4204                             40);
4205
4206                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4207                             40);
4208
4209                 if (!tg3_flag(tp, 5705_PLUS)) {
4210                         u32 newbits3;
4211
4212                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4213                             tg3_asic_rev(tp) == ASIC_REV_5701) {
4214                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4215                                             CLOCK_CTRL_TXCLK_DISABLE |
4216                                             CLOCK_CTRL_44MHZ_CORE);
4217                         } else {
4218                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4219                         }
4220
4221                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
4222                                     tp->pci_clock_ctrl | newbits3, 40);
4223                 }
4224         }
4225
4226         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4227                 tg3_power_down_phy(tp, do_low_power);
4228
4229         tg3_frob_aux_power(tp, true);
4230
4231         /* Workaround for unstable PLL clock */
4232         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4233             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4234              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4235                 u32 val = tr32(0x7d00);
4236
4237                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4238                 tw32(0x7d00, val);
4239                 if (!tg3_flag(tp, ENABLE_ASF)) {
4240                         int err;
4241
4242                         err = tg3_nvram_lock(tp);
4243                         tg3_halt_cpu(tp, RX_CPU_BASE);
4244                         if (!err)
4245                                 tg3_nvram_unlock(tp);
4246                 }
4247         }
4248
4249         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4250
4251         tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4252
4253         return 0;
4254 }
4255
4256 static void tg3_power_down(struct tg3 *tp)
4257 {
4258         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4259         pci_set_power_state(tp->pdev, PCI_D3hot);
4260 }
4261
4262 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
4263 {
4264         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4265         case MII_TG3_AUX_STAT_10HALF:
4266                 *speed = SPEED_10;
4267                 *duplex = DUPLEX_HALF;
4268                 break;
4269
4270         case MII_TG3_AUX_STAT_10FULL:
4271                 *speed = SPEED_10;
4272                 *duplex = DUPLEX_FULL;
4273                 break;
4274
4275         case MII_TG3_AUX_STAT_100HALF:
4276                 *speed = SPEED_100;
4277                 *duplex = DUPLEX_HALF;
4278                 break;
4279
4280         case MII_TG3_AUX_STAT_100FULL:
4281                 *speed = SPEED_100;
4282                 *duplex = DUPLEX_FULL;
4283                 break;
4284
4285         case MII_TG3_AUX_STAT_1000HALF:
4286                 *speed = SPEED_1000;
4287                 *duplex = DUPLEX_HALF;
4288                 break;
4289
4290         case MII_TG3_AUX_STAT_1000FULL:
4291                 *speed = SPEED_1000;
4292                 *duplex = DUPLEX_FULL;
4293                 break;
4294
4295         default:
4296                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4297                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4298                                  SPEED_10;
4299                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4300                                   DUPLEX_HALF;
4301                         break;
4302                 }
4303                 *speed = SPEED_UNKNOWN;
4304                 *duplex = DUPLEX_UNKNOWN;
4305                 break;
4306         }
4307 }
4308
4309 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4310 {
4311         int err = 0;
4312         u32 val, new_adv;
4313
4314         new_adv = ADVERTISE_CSMA;
4315         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4316         new_adv |= mii_advertise_flowctrl(flowctrl);
4317
4318         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4319         if (err)
4320                 goto done;
4321
4322         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4323                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4324
4325                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4326                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4327                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4328
4329                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4330                 if (err)
4331                         goto done;
4332         }
4333
4334         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4335                 goto done;
4336
4337         tw32(TG3_CPMU_EEE_MODE,
4338              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4339
4340         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4341         if (!err) {
4342                 u32 err2;
4343
4344                 val = 0;
4345                 /* Advertise 100-BaseTX EEE ability */
4346                 if (advertise & ADVERTISED_100baseT_Full)
4347                         val |= MDIO_AN_EEE_ADV_100TX;
4348                 /* Advertise 1000-BaseT EEE ability */
4349                 if (advertise & ADVERTISED_1000baseT_Full)
4350                         val |= MDIO_AN_EEE_ADV_1000T;
4351
4352                 if (!tp->eee.eee_enabled) {
4353                         val = 0;
4354                         tp->eee.advertised = 0;
4355                 } else {
4356                         tp->eee.advertised = advertise &
4357                                              (ADVERTISED_100baseT_Full |
4358                                               ADVERTISED_1000baseT_Full);
4359                 }
4360
4361                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4362                 if (err)
4363                         val = 0;
4364
4365                 switch (tg3_asic_rev(tp)) {
4366                 case ASIC_REV_5717:
4367                 case ASIC_REV_57765:
4368                 case ASIC_REV_57766:
4369                 case ASIC_REV_5719:
4370                         /* If we advertised any eee advertisements above... */
4371                         if (val)
4372                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4373                                       MII_TG3_DSP_TAP26_RMRXSTO |
4374                                       MII_TG3_DSP_TAP26_OPCSINPT;
4375                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4376                         fallthrough;
4377                 case ASIC_REV_5720:
4378                 case ASIC_REV_5762:
4379                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4380                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4381                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4382                 }
4383
4384                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4385                 if (!err)
4386                         err = err2;
4387         }
4388
4389 done:
4390         return err;
4391 }
4392
4393 static void tg3_phy_copper_begin(struct tg3 *tp)
4394 {
4395         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4396             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4397                 u32 adv, fc;
4398
4399                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4400                     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4401                         adv = ADVERTISED_10baseT_Half |
4402                               ADVERTISED_10baseT_Full;
4403                         if (tg3_flag(tp, WOL_SPEED_100MB))
4404                                 adv |= ADVERTISED_100baseT_Half |
4405                                        ADVERTISED_100baseT_Full;
4406                         if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4407                                 if (!(tp->phy_flags &
4408                                       TG3_PHYFLG_DISABLE_1G_HD_ADV))
4409                                         adv |= ADVERTISED_1000baseT_Half;
4410                                 adv |= ADVERTISED_1000baseT_Full;
4411                         }
4412
4413                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4414                 } else {
4415                         adv = tp->link_config.advertising;
4416                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4417                                 adv &= ~(ADVERTISED_1000baseT_Half |
4418                                          ADVERTISED_1000baseT_Full);
4419
4420                         fc = tp->link_config.flowctrl;
4421                 }
4422
4423                 tg3_phy_autoneg_cfg(tp, adv, fc);
4424
4425                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4426                     (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4427                         /* Normally during power down we want to autonegotiate
4428                          * the lowest possible speed for WOL. However, to avoid
4429                          * link flap, we leave it untouched.
4430                          */
4431                         return;
4432                 }
4433
4434                 tg3_writephy(tp, MII_BMCR,
4435                              BMCR_ANENABLE | BMCR_ANRESTART);
4436         } else {
4437                 int i;
4438                 u32 bmcr, orig_bmcr;
4439
4440                 tp->link_config.active_speed = tp->link_config.speed;
4441                 tp->link_config.active_duplex = tp->link_config.duplex;
4442
4443                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4444                         /* With autoneg disabled, 5715 only links up when the
4445                          * advertisement register has the configured speed
4446                          * enabled.
4447                          */
4448                         tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4449                 }
4450
4451                 bmcr = 0;
4452                 switch (tp->link_config.speed) {
4453                 default:
4454                 case SPEED_10:
4455                         break;
4456
4457                 case SPEED_100:
4458                         bmcr |= BMCR_SPEED100;
4459                         break;
4460
4461                 case SPEED_1000:
4462                         bmcr |= BMCR_SPEED1000;
4463                         break;
4464                 }
4465
4466                 if (tp->link_config.duplex == DUPLEX_FULL)
4467                         bmcr |= BMCR_FULLDPLX;
4468
4469                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4470                     (bmcr != orig_bmcr)) {
4471                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4472                         for (i = 0; i < 1500; i++) {
4473                                 u32 tmp;
4474
4475                                 udelay(10);
4476                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4477                                     tg3_readphy(tp, MII_BMSR, &tmp))
4478                                         continue;
4479                                 if (!(tmp & BMSR_LSTATUS)) {
4480                                         udelay(40);
4481                                         break;
4482                                 }
4483                         }
4484                         tg3_writephy(tp, MII_BMCR, bmcr);
4485                         udelay(40);
4486                 }
4487         }
4488 }
4489
4490 static int tg3_phy_pull_config(struct tg3 *tp)
4491 {
4492         int err;
4493         u32 val;
4494
4495         err = tg3_readphy(tp, MII_BMCR, &val);
4496         if (err)
4497                 goto done;
4498
4499         if (!(val & BMCR_ANENABLE)) {
4500                 tp->link_config.autoneg = AUTONEG_DISABLE;
4501                 tp->link_config.advertising = 0;
4502                 tg3_flag_clear(tp, PAUSE_AUTONEG);
4503
4504                 err = -EIO;
4505
4506                 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4507                 case 0:
4508                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4509                                 goto done;
4510
4511                         tp->link_config.speed = SPEED_10;
4512                         break;
4513                 case BMCR_SPEED100:
4514                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4515                                 goto done;
4516
4517                         tp->link_config.speed = SPEED_100;
4518                         break;
4519                 case BMCR_SPEED1000:
4520                         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4521                                 tp->link_config.speed = SPEED_1000;
4522                                 break;
4523                         }
4524                         fallthrough;
4525                 default:
4526                         goto done;
4527                 }
4528
4529                 if (val & BMCR_FULLDPLX)
4530                         tp->link_config.duplex = DUPLEX_FULL;
4531                 else
4532                         tp->link_config.duplex = DUPLEX_HALF;
4533
4534                 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4535
4536                 err = 0;
4537                 goto done;
4538         }
4539
4540         tp->link_config.autoneg = AUTONEG_ENABLE;
4541         tp->link_config.advertising = ADVERTISED_Autoneg;
4542         tg3_flag_set(tp, PAUSE_AUTONEG);
4543
4544         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4545                 u32 adv;
4546
4547                 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4548                 if (err)
4549                         goto done;
4550
4551                 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4552                 tp->link_config.advertising |= adv | ADVERTISED_TP;
4553
4554                 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4555         } else {
4556                 tp->link_config.advertising |= ADVERTISED_FIBRE;
4557         }
4558
4559         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4560                 u32 adv;
4561
4562                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4563                         err = tg3_readphy(tp, MII_CTRL1000, &val);
4564                         if (err)
4565                                 goto done;
4566
4567                         adv = mii_ctrl1000_to_ethtool_adv_t(val);
4568                 } else {
4569                         err = tg3_readphy(tp, MII_ADVERTISE, &val);
4570                         if (err)
4571                                 goto done;
4572
4573                         adv = tg3_decode_flowctrl_1000X(val);
4574                         tp->link_config.flowctrl = adv;
4575
4576                         val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4577                         adv = mii_adv_to_ethtool_adv_x(val);
4578                 }
4579
4580                 tp->link_config.advertising |= adv;
4581         }
4582
4583 done:
4584         return err;
4585 }
4586
4587 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4588 {
4589         int err;
4590
4591         /* Turn off tap power management. */
4592         /* Set Extended packet length bit */
4593         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4594
4595         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4596         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4597         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4598         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4599         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4600
4601         udelay(40);
4602
4603         return err;
4604 }
4605
4606 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4607 {
4608         struct ethtool_eee eee;
4609
4610         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4611                 return true;
4612
4613         tg3_eee_pull_config(tp, &eee);
4614
4615         if (tp->eee.eee_enabled) {
4616                 if (tp->eee.advertised != eee.advertised ||
4617                     tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4618                     tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4619                         return false;
4620         } else {
4621                 /* EEE is disabled but we're advertising */
4622                 if (eee.advertised)
4623                         return false;
4624         }
4625
4626         return true;
4627 }
4628
4629 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4630 {
4631         u32 advmsk, tgtadv, advertising;
4632
4633         advertising = tp->link_config.advertising;
4634         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4635
4636         advmsk = ADVERTISE_ALL;
4637         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4638                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4639                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4640         }
4641
4642         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4643                 return false;
4644
4645         if ((*lcladv & advmsk) != tgtadv)
4646                 return false;
4647
4648         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4649                 u32 tg3_ctrl;
4650
4651                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4652
4653                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4654                         return false;
4655
4656                 if (tgtadv &&
4657                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4658                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4659                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4660                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4661                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4662                 } else {
4663                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4664                 }
4665
4666                 if (tg3_ctrl != tgtadv)
4667                         return false;
4668         }
4669
4670         return true;
4671 }
4672
4673 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4674 {
4675         u32 lpeth = 0;
4676
4677         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4678                 u32 val;
4679
4680                 if (tg3_readphy(tp, MII_STAT1000, &val))
4681                         return false;
4682
4683                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4684         }
4685
4686         if (tg3_readphy(tp, MII_LPA, rmtadv))
4687                 return false;
4688
4689         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4690         tp->link_config.rmt_adv = lpeth;
4691
4692         return true;
4693 }
4694
4695 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4696 {
4697         if (curr_link_up != tp->link_up) {
4698                 if (curr_link_up) {
4699                         netif_carrier_on(tp->dev);
4700                 } else {
4701                         netif_carrier_off(tp->dev);
4702                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4703                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4704                 }
4705
4706                 tg3_link_report(tp);
4707                 return true;
4708         }
4709
4710         return false;
4711 }
4712
4713 static void tg3_clear_mac_status(struct tg3 *tp)
4714 {
4715         tw32(MAC_EVENT, 0);
4716
4717         tw32_f(MAC_STATUS,
4718                MAC_STATUS_SYNC_CHANGED |
4719                MAC_STATUS_CFG_CHANGED |
4720                MAC_STATUS_MI_COMPLETION |
4721                MAC_STATUS_LNKSTATE_CHANGED);
4722         udelay(40);
4723 }
4724
4725 static void tg3_setup_eee(struct tg3 *tp)
4726 {
4727         u32 val;
4728
4729         val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4730               TG3_CPMU_EEE_LNKIDL_UART_IDL;
4731         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4732                 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4733
4734         tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4735
4736         tw32_f(TG3_CPMU_EEE_CTRL,
4737                TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4738
4739         val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4740               (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4741               TG3_CPMU_EEEMD_LPI_IN_RX |
4742               TG3_CPMU_EEEMD_EEE_ENABLE;
4743
4744         if (tg3_asic_rev(tp) != ASIC_REV_5717)
4745                 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4746
4747         if (tg3_flag(tp, ENABLE_APE))
4748                 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4749
4750         tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4751
4752         tw32_f(TG3_CPMU_EEE_DBTMR1,
4753                TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4754                (tp->eee.tx_lpi_timer & 0xffff));
4755
4756         tw32_f(TG3_CPMU_EEE_DBTMR2,
4757                TG3_CPMU_DBTMR2_APE_TX_2047US |
4758                TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4759 }
4760
4761 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4762 {
4763         bool current_link_up;
4764         u32 bmsr, val;
4765         u32 lcl_adv, rmt_adv;
4766         u32 current_speed;
4767         u8 current_duplex;
4768         int i, err;
4769
4770         tg3_clear_mac_status(tp);
4771
4772         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4773                 tw32_f(MAC_MI_MODE,
4774                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4775                 udelay(80);
4776         }
4777
4778         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4779
4780         /* Some third-party PHYs need to be reset on link going
4781          * down.
4782          */
4783         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4784              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4785              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4786             tp->link_up) {
4787                 tg3_readphy(tp, MII_BMSR, &bmsr);
4788                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4789                     !(bmsr & BMSR_LSTATUS))
4790                         force_reset = true;
4791         }
4792         if (force_reset)
4793                 tg3_phy_reset(tp);
4794
4795         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4796                 tg3_readphy(tp, MII_BMSR, &bmsr);
4797                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4798                     !tg3_flag(tp, INIT_COMPLETE))
4799                         bmsr = 0;
4800
4801                 if (!(bmsr & BMSR_LSTATUS)) {
4802                         err = tg3_init_5401phy_dsp(tp);
4803                         if (err)
4804                                 return err;
4805
4806                         tg3_readphy(tp, MII_BMSR, &bmsr);
4807                         for (i = 0; i < 1000; i++) {
4808                                 udelay(10);
4809                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4810                                     (bmsr & BMSR_LSTATUS)) {
4811                                         udelay(40);
4812                                         break;
4813                                 }
4814                         }
4815
4816                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4817                             TG3_PHY_REV_BCM5401_B0 &&
4818                             !(bmsr & BMSR_LSTATUS) &&
4819                             tp->link_config.active_speed == SPEED_1000) {
4820                                 err = tg3_phy_reset(tp);
4821                                 if (!err)
4822                                         err = tg3_init_5401phy_dsp(tp);
4823                                 if (err)
4824                                         return err;
4825                         }
4826                 }
4827         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4828                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4829                 /* 5701 {A0,B0} CRC bug workaround */
4830                 tg3_writephy(tp, 0x15, 0x0a75);
4831                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4832                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4833                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4834         }
4835
4836         /* Clear pending interrupts... */
4837         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4838         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4839
4840         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4841                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4842         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4843                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4844
4845         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4846             tg3_asic_rev(tp) == ASIC_REV_5701) {
4847                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4848                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4849                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4850                 else
4851                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4852         }
4853
4854         current_link_up = false;
4855         current_speed = SPEED_UNKNOWN;
4856         current_duplex = DUPLEX_UNKNOWN;
4857         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4858         tp->link_config.rmt_adv = 0;
4859
4860         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4861                 err = tg3_phy_auxctl_read(tp,
4862                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4863                                           &val);
4864                 if (!err && !(val & (1 << 10))) {
4865                         tg3_phy_auxctl_write(tp,
4866                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4867                                              val | (1 << 10));
4868                         goto relink;
4869                 }
4870         }
4871
4872         bmsr = 0;
4873         for (i = 0; i < 100; i++) {
4874                 tg3_readphy(tp, MII_BMSR, &bmsr);
4875                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4876                     (bmsr & BMSR_LSTATUS))
4877                         break;
4878                 udelay(40);
4879         }
4880
4881         if (bmsr & BMSR_LSTATUS) {
4882                 u32 aux_stat, bmcr;
4883
4884                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4885                 for (i = 0; i < 2000; i++) {
4886                         udelay(10);
4887                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4888                             aux_stat)
4889                                 break;
4890                 }
4891
4892                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4893                                              &current_speed,
4894                                              &current_duplex);
4895
4896                 bmcr = 0;
4897                 for (i = 0; i < 200; i++) {
4898                         tg3_readphy(tp, MII_BMCR, &bmcr);
4899                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4900                                 continue;
4901                         if (bmcr && bmcr != 0x7fff)
4902                                 break;
4903                         udelay(10);
4904                 }
4905
4906                 lcl_adv = 0;
4907                 rmt_adv = 0;
4908
4909                 tp->link_config.active_speed = current_speed;
4910                 tp->link_config.active_duplex = current_duplex;
4911
4912                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4913                         bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4914
4915                         if ((bmcr & BMCR_ANENABLE) &&
4916                             eee_config_ok &&
4917                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4918                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4919                                 current_link_up = true;
4920
4921                         /* EEE settings changes take effect only after a phy
4922                          * reset.  If we have skipped a reset due to Link Flap
4923                          * Avoidance being enabled, do it now.
4924                          */
4925                         if (!eee_config_ok &&
4926                             (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4927                             !force_reset) {
4928                                 tg3_setup_eee(tp);
4929                                 tg3_phy_reset(tp);
4930                         }
4931                 } else {
4932                         if (!(bmcr & BMCR_ANENABLE) &&
4933                             tp->link_config.speed == current_speed &&
4934                             tp->link_config.duplex == current_duplex) {
4935                                 current_link_up = true;
4936                         }
4937                 }
4938
4939                 if (current_link_up &&
4940                     tp->link_config.active_duplex == DUPLEX_FULL) {
4941                         u32 reg, bit;
4942
4943                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4944                                 reg = MII_TG3_FET_GEN_STAT;
4945                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4946                         } else {
4947                                 reg = MII_TG3_EXT_STAT;
4948                                 bit = MII_TG3_EXT_STAT_MDIX;
4949                         }
4950
4951                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4952                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4953
4954                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4955                 }
4956         }
4957
4958 relink:
4959         if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4960                 tg3_phy_copper_begin(tp);
4961
4962                 if (tg3_flag(tp, ROBOSWITCH)) {
4963                         current_link_up = true;
4964                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4965                         current_speed = SPEED_1000;
4966                         current_duplex = DUPLEX_FULL;
4967                         tp->link_config.active_speed = current_speed;
4968                         tp->link_config.active_duplex = current_duplex;
4969                 }
4970
4971                 tg3_readphy(tp, MII_BMSR, &bmsr);
4972                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4973                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4974                         current_link_up = true;
4975         }
4976
4977         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4978         if (current_link_up) {
4979                 if (tp->link_config.active_speed == SPEED_100 ||
4980                     tp->link_config.active_speed == SPEED_10)
4981                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4982                 else
4983                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4984         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4985                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4986         else
4987                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4988
4989         /* In order for the 5750 core in BCM4785 chip to work properly
4990          * in RGMII mode, the Led Control Register must be set up.
4991          */
4992         if (tg3_flag(tp, RGMII_MODE)) {
4993                 u32 led_ctrl = tr32(MAC_LED_CTRL);
4994                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4995
4996                 if (tp->link_config.active_speed == SPEED_10)
4997                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4998                 else if (tp->link_config.active_speed == SPEED_100)
4999                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5000                                      LED_CTRL_100MBPS_ON);
5001                 else if (tp->link_config.active_speed == SPEED_1000)
5002                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5003                                      LED_CTRL_1000MBPS_ON);
5004
5005                 tw32(MAC_LED_CTRL, led_ctrl);
5006                 udelay(40);
5007         }
5008
5009         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5010         if (tp->link_config.active_duplex == DUPLEX_HALF)
5011                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5012
5013         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5014                 if (current_link_up &&
5015                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5016                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5017                 else
5018                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5019         }
5020
5021         /* ??? Without this setting Netgear GA302T PHY does not
5022          * ??? send/receive packets...
5023          */
5024         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5025             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5026                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5027                 tw32_f(MAC_MI_MODE, tp->mi_mode);
5028                 udelay(80);
5029         }
5030
5031         tw32_f(MAC_MODE, tp->mac_mode);
5032         udelay(40);
5033
5034         tg3_phy_eee_adjust(tp, current_link_up);
5035
5036         if (tg3_flag(tp, USE_LINKCHG_REG)) {
5037                 /* Polled via timer. */
5038                 tw32_f(MAC_EVENT, 0);
5039         } else {
5040                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5041         }
5042         udelay(40);
5043
5044         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5045             current_link_up &&
5046             tp->link_config.active_speed == SPEED_1000 &&
5047             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5048                 udelay(120);
5049                 tw32_f(MAC_STATUS,
5050                      (MAC_STATUS_SYNC_CHANGED |
5051                       MAC_STATUS_CFG_CHANGED));
5052                 udelay(40);
5053                 tg3_write_mem(tp,
5054                               NIC_SRAM_FIRMWARE_MBOX,
5055                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5056         }
5057
5058         /* Prevent send BD corruption. */
5059         if (tg3_flag(tp, CLKREQ_BUG)) {
5060                 if (tp->link_config.active_speed == SPEED_100 ||
5061                     tp->link_config.active_speed == SPEED_10)
5062                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5063                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
5064                 else
5065                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5066                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
5067         }
5068
5069         tg3_test_and_report_link_chg(tp, current_link_up);
5070
5071         return 0;
5072 }
5073
5074 struct tg3_fiber_aneginfo {
5075         int state;
5076 #define ANEG_STATE_UNKNOWN              0
5077 #define ANEG_STATE_AN_ENABLE            1
5078 #define ANEG_STATE_RESTART_INIT         2
5079 #define ANEG_STATE_RESTART              3
5080 #define ANEG_STATE_DISABLE_LINK_OK      4
5081 #define ANEG_STATE_ABILITY_DETECT_INIT  5
5082 #define ANEG_STATE_ABILITY_DETECT       6
5083 #define ANEG_STATE_ACK_DETECT_INIT      7
5084 #define ANEG_STATE_ACK_DETECT           8
5085 #define ANEG_STATE_COMPLETE_ACK_INIT    9
5086 #define ANEG_STATE_COMPLETE_ACK         10
5087 #define ANEG_STATE_IDLE_DETECT_INIT     11
5088 #define ANEG_STATE_IDLE_DETECT          12
5089 #define ANEG_STATE_LINK_OK              13
5090 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
5091 #define ANEG_STATE_NEXT_PAGE_WAIT       15
5092
5093         u32 flags;
5094 #define MR_AN_ENABLE            0x00000001
5095 #define MR_RESTART_AN           0x00000002
5096 #define MR_AN_COMPLETE          0x00000004
5097 #define MR_PAGE_RX              0x00000008
5098 #define MR_NP_LOADED            0x00000010
5099 #define MR_TOGGLE_TX            0x00000020
5100 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
5101 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
5102 #define MR_LP_ADV_SYM_PAUSE     0x00000100
5103 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
5104 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5105 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5106 #define MR_LP_ADV_NEXT_PAGE     0x00001000
5107 #define MR_TOGGLE_RX            0x00002000
5108 #define MR_NP_RX                0x00004000
5109
5110 #define MR_LINK_OK              0x80000000
5111
5112         unsigned long link_time, cur_time;
5113
5114         u32 ability_match_cfg;
5115         int ability_match_count;
5116
5117         char ability_match, idle_match, ack_match;
5118
5119         u32 txconfig, rxconfig;
5120 #define ANEG_CFG_NP             0x00000080
5121 #define ANEG_CFG_ACK            0x00000040
5122 #define ANEG_CFG_RF2            0x00000020
5123 #define ANEG_CFG_RF1            0x00000010
5124 #define ANEG_CFG_PS2            0x00000001
5125 #define ANEG_CFG_PS1            0x00008000
5126 #define ANEG_CFG_HD             0x00004000
5127 #define ANEG_CFG_FD             0x00002000
5128 #define ANEG_CFG_INVAL          0x00001f06
5129
5130 };
5131 #define ANEG_OK         0
5132 #define ANEG_DONE       1
5133 #define ANEG_TIMER_ENAB 2
5134 #define ANEG_FAILED     -1
5135
5136 #define ANEG_STATE_SETTLE_TIME  10000
5137
5138 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5139                                    struct tg3_fiber_aneginfo *ap)
5140 {
5141         u16 flowctrl;
5142         unsigned long delta;
5143         u32 rx_cfg_reg;
5144         int ret;
5145
5146         if (ap->state == ANEG_STATE_UNKNOWN) {
5147                 ap->rxconfig = 0;
5148                 ap->link_time = 0;
5149                 ap->cur_time = 0;
5150                 ap->ability_match_cfg = 0;
5151                 ap->ability_match_count = 0;
5152                 ap->ability_match = 0;
5153                 ap->idle_match = 0;
5154                 ap->ack_match = 0;
5155         }
5156         ap->cur_time++;
5157
5158         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5159                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5160
5161                 if (rx_cfg_reg != ap->ability_match_cfg) {
5162                         ap->ability_match_cfg = rx_cfg_reg;
5163                         ap->ability_match = 0;
5164                         ap->ability_match_count = 0;
5165                 } else {
5166                         if (++ap->ability_match_count > 1) {
5167                                 ap->ability_match = 1;
5168                                 ap->ability_match_cfg = rx_cfg_reg;
5169                         }
5170                 }
5171                 if (rx_cfg_reg & ANEG_CFG_ACK)
5172                         ap->ack_match = 1;
5173                 else
5174                         ap->ack_match = 0;
5175
5176                 ap->idle_match = 0;
5177         } else {
5178                 ap->idle_match = 1;
5179                 ap->ability_match_cfg = 0;
5180                 ap->ability_match_count = 0;
5181                 ap->ability_match = 0;
5182                 ap->ack_match = 0;
5183
5184                 rx_cfg_reg = 0;
5185         }
5186
5187         ap->rxconfig = rx_cfg_reg;
5188         ret = ANEG_OK;
5189
5190         switch (ap->state) {
5191         case ANEG_STATE_UNKNOWN:
5192                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5193                         ap->state = ANEG_STATE_AN_ENABLE;
5194
5195                 fallthrough;
5196         case ANEG_STATE_AN_ENABLE:
5197                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5198                 if (ap->flags & MR_AN_ENABLE) {
5199                         ap->link_time = 0;
5200                         ap->cur_time = 0;
5201                         ap->ability_match_cfg = 0;
5202                         ap->ability_match_count = 0;
5203                         ap->ability_match = 0;
5204                         ap->idle_match = 0;
5205                         ap->ack_match = 0;
5206
5207                         ap->state = ANEG_STATE_RESTART_INIT;
5208                 } else {
5209                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
5210                 }
5211                 break;
5212
5213         case ANEG_STATE_RESTART_INIT:
5214                 ap->link_time = ap->cur_time;
5215                 ap->flags &= ~(MR_NP_LOADED);
5216                 ap->txconfig = 0;
5217                 tw32(MAC_TX_AUTO_NEG, 0);
5218                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5219                 tw32_f(MAC_MODE, tp->mac_mode);
5220                 udelay(40);
5221
5222                 ret = ANEG_TIMER_ENAB;
5223                 ap->state = ANEG_STATE_RESTART;
5224
5225                 fallthrough;
5226         case ANEG_STATE_RESTART:
5227                 delta = ap->cur_time - ap->link_time;
5228                 if (delta > ANEG_STATE_SETTLE_TIME)
5229                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5230                 else
5231                         ret = ANEG_TIMER_ENAB;
5232                 break;
5233
5234         case ANEG_STATE_DISABLE_LINK_OK:
5235                 ret = ANEG_DONE;
5236                 break;
5237
5238         case ANEG_STATE_ABILITY_DETECT_INIT:
5239                 ap->flags &= ~(MR_TOGGLE_TX);
5240                 ap->txconfig = ANEG_CFG_FD;
5241                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5242                 if (flowctrl & ADVERTISE_1000XPAUSE)
5243                         ap->txconfig |= ANEG_CFG_PS1;
5244                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5245                         ap->txconfig |= ANEG_CFG_PS2;
5246                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5247                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5248                 tw32_f(MAC_MODE, tp->mac_mode);
5249                 udelay(40);
5250
5251                 ap->state = ANEG_STATE_ABILITY_DETECT;
5252                 break;
5253
5254         case ANEG_STATE_ABILITY_DETECT:
5255                 if (ap->ability_match != 0 && ap->rxconfig != 0)
5256                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
5257                 break;
5258
5259         case ANEG_STATE_ACK_DETECT_INIT:
5260                 ap->txconfig |= ANEG_CFG_ACK;
5261                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5262                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5263                 tw32_f(MAC_MODE, tp->mac_mode);
5264                 udelay(40);
5265
5266                 ap->state = ANEG_STATE_ACK_DETECT;
5267
5268                 fallthrough;
5269         case ANEG_STATE_ACK_DETECT:
5270                 if (ap->ack_match != 0) {
5271                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5272                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5273                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5274                         } else {
5275                                 ap->state = ANEG_STATE_AN_ENABLE;
5276                         }
5277                 } else if (ap->ability_match != 0 &&
5278                            ap->rxconfig == 0) {
5279                         ap->state = ANEG_STATE_AN_ENABLE;
5280                 }
5281                 break;
5282
5283         case ANEG_STATE_COMPLETE_ACK_INIT:
5284                 if (ap->rxconfig & ANEG_CFG_INVAL) {
5285                         ret = ANEG_FAILED;
5286                         break;
5287                 }
5288                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5289                                MR_LP_ADV_HALF_DUPLEX |
5290                                MR_LP_ADV_SYM_PAUSE |
5291                                MR_LP_ADV_ASYM_PAUSE |
5292                                MR_LP_ADV_REMOTE_FAULT1 |
5293                                MR_LP_ADV_REMOTE_FAULT2 |
5294                                MR_LP_ADV_NEXT_PAGE |
5295                                MR_TOGGLE_RX |
5296                                MR_NP_RX);
5297                 if (ap->rxconfig & ANEG_CFG_FD)
5298                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5299                 if (ap->rxconfig & ANEG_CFG_HD)
5300                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5301                 if (ap->rxconfig & ANEG_CFG_PS1)
5302                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
5303                 if (ap->rxconfig & ANEG_CFG_PS2)
5304                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5305                 if (ap->rxconfig & ANEG_CFG_RF1)
5306                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5307                 if (ap->rxconfig & ANEG_CFG_RF2)
5308                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5309                 if (ap->rxconfig & ANEG_CFG_NP)
5310                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
5311
5312                 ap->link_time = ap->cur_time;
5313
5314                 ap->flags ^= (MR_TOGGLE_TX);
5315                 if (ap->rxconfig & 0x0008)
5316                         ap->flags |= MR_TOGGLE_RX;
5317                 if (ap->rxconfig & ANEG_CFG_NP)
5318                         ap->flags |= MR_NP_RX;
5319                 ap->flags |= MR_PAGE_RX;
5320
5321                 ap->state = ANEG_STATE_COMPLETE_ACK;
5322                 ret = ANEG_TIMER_ENAB;
5323                 break;
5324
5325         case ANEG_STATE_COMPLETE_ACK:
5326                 if (ap->ability_match != 0 &&
5327                     ap->rxconfig == 0) {
5328                         ap->state = ANEG_STATE_AN_ENABLE;
5329                         break;
5330                 }
5331                 delta = ap->cur_time - ap->link_time;
5332                 if (delta > ANEG_STATE_SETTLE_TIME) {
5333                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5334                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5335                         } else {
5336                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5337                                     !(ap->flags & MR_NP_RX)) {
5338                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5339                                 } else {
5340                                         ret = ANEG_FAILED;
5341                                 }
5342                         }
5343                 }
5344                 break;
5345
5346         case ANEG_STATE_IDLE_DETECT_INIT:
5347                 ap->link_time = ap->cur_time;
5348                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5349                 tw32_f(MAC_MODE, tp->mac_mode);
5350                 udelay(40);
5351
5352                 ap->state = ANEG_STATE_IDLE_DETECT;
5353                 ret = ANEG_TIMER_ENAB;
5354                 break;
5355
5356         case ANEG_STATE_IDLE_DETECT:
5357                 if (ap->ability_match != 0 &&
5358                     ap->rxconfig == 0) {
5359                         ap->state = ANEG_STATE_AN_ENABLE;
5360                         break;
5361                 }
5362                 delta = ap->cur_time - ap->link_time;
5363                 if (delta > ANEG_STATE_SETTLE_TIME) {
5364                         /* XXX another gem from the Broadcom driver :( */
5365                         ap->state = ANEG_STATE_LINK_OK;
5366                 }
5367                 break;
5368
5369         case ANEG_STATE_LINK_OK:
5370                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5371                 ret = ANEG_DONE;
5372                 break;
5373
5374         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5375                 /* ??? unimplemented */
5376                 break;
5377
5378         case ANEG_STATE_NEXT_PAGE_WAIT:
5379                 /* ??? unimplemented */
5380                 break;
5381
5382         default:
5383                 ret = ANEG_FAILED;
5384                 break;
5385         }
5386
5387         return ret;
5388 }
5389
5390 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5391 {
5392         int res = 0;
5393         struct tg3_fiber_aneginfo aninfo;
5394         int status = ANEG_FAILED;
5395         unsigned int tick;
5396         u32 tmp;
5397
5398         tw32_f(MAC_TX_AUTO_NEG, 0);
5399
5400         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5401         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5402         udelay(40);
5403
5404         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5405         udelay(40);
5406
5407         memset(&aninfo, 0, sizeof(aninfo));
5408         aninfo.flags |= MR_AN_ENABLE;
5409         aninfo.state = ANEG_STATE_UNKNOWN;
5410         aninfo.cur_time = 0;
5411         tick = 0;
5412         while (++tick < 195000) {
5413                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5414                 if (status == ANEG_DONE || status == ANEG_FAILED)
5415                         break;
5416
5417                 udelay(1);
5418         }
5419
5420         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5421         tw32_f(MAC_MODE, tp->mac_mode);
5422         udelay(40);
5423
5424         *txflags = aninfo.txconfig;
5425         *rxflags = aninfo.flags;
5426
5427         if (status == ANEG_DONE &&
5428             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5429                              MR_LP_ADV_FULL_DUPLEX)))
5430                 res = 1;
5431
5432         return res;
5433 }
5434
5435 static void tg3_init_bcm8002(struct tg3 *tp)
5436 {
5437         u32 mac_status = tr32(MAC_STATUS);
5438         int i;
5439
5440         /* Reset when initting first time or we have a link. */
5441         if (tg3_flag(tp, INIT_COMPLETE) &&
5442             !(mac_status & MAC_STATUS_PCS_SYNCED))
5443                 return;
5444
5445         /* Set PLL lock range. */
5446         tg3_writephy(tp, 0x16, 0x8007);
5447
5448         /* SW reset */
5449         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5450
5451         /* Wait for reset to complete. */
5452         /* XXX schedule_timeout() ... */
5453         for (i = 0; i < 500; i++)
5454                 udelay(10);
5455
5456         /* Config mode; select PMA/Ch 1 regs. */
5457         tg3_writephy(tp, 0x10, 0x8411);
5458
5459         /* Enable auto-lock and comdet, select txclk for tx. */
5460         tg3_writephy(tp, 0x11, 0x0a10);
5461
5462         tg3_writephy(tp, 0x18, 0x00a0);
5463         tg3_writephy(tp, 0x16, 0x41ff);
5464
5465         /* Assert and deassert POR. */
5466         tg3_writephy(tp, 0x13, 0x0400);
5467         udelay(40);
5468         tg3_writephy(tp, 0x13, 0x0000);
5469
5470         tg3_writephy(tp, 0x11, 0x0a50);
5471         udelay(40);
5472         tg3_writephy(tp, 0x11, 0x0a10);
5473
5474         /* Wait for signal to stabilize */
5475         /* XXX schedule_timeout() ... */
5476         for (i = 0; i < 15000; i++)
5477                 udelay(10);
5478
5479         /* Deselect the channel register so we can read the PHYID
5480          * later.
5481          */
5482         tg3_writephy(tp, 0x10, 0x8011);
5483 }
5484
5485 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5486 {
5487         u16 flowctrl;
5488         bool current_link_up;
5489         u32 sg_dig_ctrl, sg_dig_status;
5490         u32 serdes_cfg, expected_sg_dig_ctrl;
5491         int workaround, port_a;
5492
5493         serdes_cfg = 0;
5494         workaround = 0;
5495         port_a = 1;
5496         current_link_up = false;
5497
5498         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5499             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5500                 workaround = 1;
5501                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5502                         port_a = 0;
5503
5504                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5505                 /* preserve bits 20-23 for voltage regulator */
5506                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5507         }
5508
5509         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5510
5511         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5512                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5513                         if (workaround) {
5514                                 u32 val = serdes_cfg;
5515
5516                                 if (port_a)
5517                                         val |= 0xc010000;
5518                                 else
5519                                         val |= 0x4010000;
5520                                 tw32_f(MAC_SERDES_CFG, val);
5521                         }
5522
5523                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5524                 }
5525                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5526                         tg3_setup_flow_control(tp, 0, 0);
5527                         current_link_up = true;
5528                 }
5529                 goto out;
5530         }
5531
5532         /* Want auto-negotiation.  */
5533         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5534
5535         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5536         if (flowctrl & ADVERTISE_1000XPAUSE)
5537                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5538         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5539                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5540
5541         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5542                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5543                     tp->serdes_counter &&
5544                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5545                                     MAC_STATUS_RCVD_CFG)) ==
5546                      MAC_STATUS_PCS_SYNCED)) {
5547                         tp->serdes_counter--;
5548                         current_link_up = true;
5549                         goto out;
5550                 }
5551 restart_autoneg:
5552                 if (workaround)
5553                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5554                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5555                 udelay(5);
5556                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5557
5558                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5559                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5560         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5561                                  MAC_STATUS_SIGNAL_DET)) {
5562                 sg_dig_status = tr32(SG_DIG_STATUS);
5563                 mac_status = tr32(MAC_STATUS);
5564
5565                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5566                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5567                         u32 local_adv = 0, remote_adv = 0;
5568
5569                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5570                                 local_adv |= ADVERTISE_1000XPAUSE;
5571                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5572                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5573
5574                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5575                                 remote_adv |= LPA_1000XPAUSE;
5576                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5577                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5578
5579                         tp->link_config.rmt_adv =
5580                                            mii_adv_to_ethtool_adv_x(remote_adv);
5581
5582                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5583                         current_link_up = true;
5584                         tp->serdes_counter = 0;
5585                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5586                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5587                         if (tp->serdes_counter)
5588                                 tp->serdes_counter--;
5589                         else {
5590                                 if (workaround) {
5591                                         u32 val = serdes_cfg;
5592
5593                                         if (port_a)
5594                                                 val |= 0xc010000;
5595                                         else
5596                                                 val |= 0x4010000;
5597
5598                                         tw32_f(MAC_SERDES_CFG, val);
5599                                 }
5600
5601                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5602                                 udelay(40);
5603
5604                                 /* Link parallel detection - link is up */
5605                                 /* only if we have PCS_SYNC and not */
5606                                 /* receiving config code words */
5607                                 mac_status = tr32(MAC_STATUS);
5608                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5609                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5610                                         tg3_setup_flow_control(tp, 0, 0);
5611                                         current_link_up = true;
5612                                         tp->phy_flags |=
5613                                                 TG3_PHYFLG_PARALLEL_DETECT;
5614                                         tp->serdes_counter =
5615                                                 SERDES_PARALLEL_DET_TIMEOUT;
5616                                 } else
5617                                         goto restart_autoneg;
5618                         }
5619                 }
5620         } else {
5621                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5622                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5623         }
5624
5625 out:
5626         return current_link_up;
5627 }
5628
5629 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5630 {
5631         bool current_link_up = false;
5632
5633         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5634                 goto out;
5635
5636         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5637                 u32 txflags, rxflags;
5638                 int i;
5639
5640                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5641                         u32 local_adv = 0, remote_adv = 0;
5642
5643                         if (txflags & ANEG_CFG_PS1)
5644                                 local_adv |= ADVERTISE_1000XPAUSE;
5645                         if (txflags & ANEG_CFG_PS2)
5646                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5647
5648                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5649                                 remote_adv |= LPA_1000XPAUSE;
5650                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5651                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5652
5653                         tp->link_config.rmt_adv =
5654                                            mii_adv_to_ethtool_adv_x(remote_adv);
5655
5656                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5657
5658                         current_link_up = true;
5659                 }
5660                 for (i = 0; i < 30; i++) {
5661                         udelay(20);
5662                         tw32_f(MAC_STATUS,
5663                                (MAC_STATUS_SYNC_CHANGED |
5664                                 MAC_STATUS_CFG_CHANGED));
5665                         udelay(40);
5666                         if ((tr32(MAC_STATUS) &
5667                              (MAC_STATUS_SYNC_CHANGED |
5668                               MAC_STATUS_CFG_CHANGED)) == 0)
5669                                 break;
5670                 }
5671
5672                 mac_status = tr32(MAC_STATUS);
5673                 if (!current_link_up &&
5674                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5675                     !(mac_status & MAC_STATUS_RCVD_CFG))
5676                         current_link_up = true;
5677         } else {
5678                 tg3_setup_flow_control(tp, 0, 0);
5679
5680                 /* Forcing 1000FD link up. */
5681                 current_link_up = true;
5682
5683                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5684                 udelay(40);
5685
5686                 tw32_f(MAC_MODE, tp->mac_mode);
5687                 udelay(40);
5688         }
5689
5690 out:
5691         return current_link_up;
5692 }
5693
5694 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5695 {
5696         u32 orig_pause_cfg;
5697         u32 orig_active_speed;
5698         u8 orig_active_duplex;
5699         u32 mac_status;
5700         bool current_link_up;
5701         int i;
5702
5703         orig_pause_cfg = tp->link_config.active_flowctrl;
5704         orig_active_speed = tp->link_config.active_speed;
5705         orig_active_duplex = tp->link_config.active_duplex;
5706
5707         if (!tg3_flag(tp, HW_AUTONEG) &&
5708             tp->link_up &&
5709             tg3_flag(tp, INIT_COMPLETE)) {
5710                 mac_status = tr32(MAC_STATUS);
5711                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5712                                MAC_STATUS_SIGNAL_DET |
5713                                MAC_STATUS_CFG_CHANGED |
5714                                MAC_STATUS_RCVD_CFG);
5715                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5716                                    MAC_STATUS_SIGNAL_DET)) {
5717                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5718                                             MAC_STATUS_CFG_CHANGED));
5719                         return 0;
5720                 }
5721         }
5722
5723         tw32_f(MAC_TX_AUTO_NEG, 0);
5724
5725         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5726         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5727         tw32_f(MAC_MODE, tp->mac_mode);
5728         udelay(40);
5729
5730         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5731                 tg3_init_bcm8002(tp);
5732
5733         /* Enable link change event even when serdes polling.  */
5734         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5735         udelay(40);
5736
5737         tp->link_config.rmt_adv = 0;
5738         mac_status = tr32(MAC_STATUS);
5739
5740         if (tg3_flag(tp, HW_AUTONEG))
5741                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5742         else
5743                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5744
5745         tp->napi[0].hw_status->status =
5746                 (SD_STATUS_UPDATED |
5747                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5748
5749         for (i = 0; i < 100; i++) {
5750                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5751                                     MAC_STATUS_CFG_CHANGED));
5752                 udelay(5);
5753                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5754                                          MAC_STATUS_CFG_CHANGED |
5755                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5756                         break;
5757         }
5758
5759         mac_status = tr32(MAC_STATUS);
5760         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5761                 current_link_up = false;
5762                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5763                     tp->serdes_counter == 0) {
5764                         tw32_f(MAC_MODE, (tp->mac_mode |
5765                                           MAC_MODE_SEND_CONFIGS));
5766                         udelay(1);
5767                         tw32_f(MAC_MODE, tp->mac_mode);
5768                 }
5769         }
5770
5771         if (current_link_up) {
5772                 tp->link_config.active_speed = SPEED_1000;
5773                 tp->link_config.active_duplex = DUPLEX_FULL;
5774                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5775                                     LED_CTRL_LNKLED_OVERRIDE |
5776                                     LED_CTRL_1000MBPS_ON));
5777         } else {
5778                 tp->link_config.active_speed = SPEED_UNKNOWN;
5779                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5780                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5781                                     LED_CTRL_LNKLED_OVERRIDE |
5782                                     LED_CTRL_TRAFFIC_OVERRIDE));
5783         }
5784
5785         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5786                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5787                 if (orig_pause_cfg != now_pause_cfg ||
5788                     orig_active_speed != tp->link_config.active_speed ||
5789                     orig_active_duplex != tp->link_config.active_duplex)
5790                         tg3_link_report(tp);
5791         }
5792
5793         return 0;
5794 }
5795
5796 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5797 {
5798         int err = 0;
5799         u32 bmsr, bmcr;
5800         u32 current_speed = SPEED_UNKNOWN;
5801         u8 current_duplex = DUPLEX_UNKNOWN;
5802         bool current_link_up = false;
5803         u32 local_adv, remote_adv, sgsr;
5804
5805         if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5806              tg3_asic_rev(tp) == ASIC_REV_5720) &&
5807              !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5808              (sgsr & SERDES_TG3_SGMII_MODE)) {
5809
5810                 if (force_reset)
5811                         tg3_phy_reset(tp);
5812
5813                 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5814
5815                 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5816                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5817                 } else {
5818                         current_link_up = true;
5819                         if (sgsr & SERDES_TG3_SPEED_1000) {
5820                                 current_speed = SPEED_1000;
5821                                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5822                         } else if (sgsr & SERDES_TG3_SPEED_100) {
5823                                 current_speed = SPEED_100;
5824                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5825                         } else {
5826                                 current_speed = SPEED_10;
5827                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5828                         }
5829
5830                         if (sgsr & SERDES_TG3_FULL_DUPLEX)
5831                                 current_duplex = DUPLEX_FULL;
5832                         else
5833                                 current_duplex = DUPLEX_HALF;
5834                 }
5835
5836                 tw32_f(MAC_MODE, tp->mac_mode);
5837                 udelay(40);
5838
5839                 tg3_clear_mac_status(tp);
5840
5841                 goto fiber_setup_done;
5842         }
5843
5844         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5845         tw32_f(MAC_MODE, tp->mac_mode);
5846         udelay(40);
5847
5848         tg3_clear_mac_status(tp);
5849
5850         if (force_reset)
5851                 tg3_phy_reset(tp);
5852
5853         tp->link_config.rmt_adv = 0;
5854
5855         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5856         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5857         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5858                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5859                         bmsr |= BMSR_LSTATUS;
5860                 else
5861                         bmsr &= ~BMSR_LSTATUS;
5862         }
5863
5864         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5865
5866         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5867             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5868                 /* do nothing, just check for link up at the end */
5869         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5870                 u32 adv, newadv;
5871
5872                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5873                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5874                                  ADVERTISE_1000XPAUSE |
5875                                  ADVERTISE_1000XPSE_ASYM |
5876                                  ADVERTISE_SLCT);
5877
5878                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5879                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5880
5881                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5882                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5883                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5884                         tg3_writephy(tp, MII_BMCR, bmcr);
5885
5886                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5887                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5888                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5889
5890                         return err;
5891                 }
5892         } else {
5893                 u32 new_bmcr;
5894
5895                 bmcr &= ~BMCR_SPEED1000;
5896                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5897
5898                 if (tp->link_config.duplex == DUPLEX_FULL)
5899                         new_bmcr |= BMCR_FULLDPLX;
5900
5901                 if (new_bmcr != bmcr) {
5902                         /* BMCR_SPEED1000 is a reserved bit that needs
5903                          * to be set on write.
5904                          */
5905                         new_bmcr |= BMCR_SPEED1000;
5906
5907                         /* Force a linkdown */
5908                         if (tp->link_up) {
5909                                 u32 adv;
5910
5911                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5912                                 adv &= ~(ADVERTISE_1000XFULL |
5913                                          ADVERTISE_1000XHALF |
5914                                          ADVERTISE_SLCT);
5915                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5916                                 tg3_writephy(tp, MII_BMCR, bmcr |
5917                                                            BMCR_ANRESTART |
5918                                                            BMCR_ANENABLE);
5919                                 udelay(10);
5920                                 tg3_carrier_off(tp);
5921                         }
5922                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5923                         bmcr = new_bmcr;
5924                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5925                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5926                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5927                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5928                                         bmsr |= BMSR_LSTATUS;
5929                                 else
5930                                         bmsr &= ~BMSR_LSTATUS;
5931                         }
5932                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5933                 }
5934         }
5935
5936         if (bmsr & BMSR_LSTATUS) {
5937                 current_speed = SPEED_1000;
5938                 current_link_up = true;
5939                 if (bmcr & BMCR_FULLDPLX)
5940                         current_duplex = DUPLEX_FULL;
5941                 else
5942                         current_duplex = DUPLEX_HALF;
5943
5944                 local_adv = 0;
5945                 remote_adv = 0;
5946
5947                 if (bmcr & BMCR_ANENABLE) {
5948                         u32 common;
5949
5950                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5951                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5952                         common = local_adv & remote_adv;
5953                         if (common & (ADVERTISE_1000XHALF |
5954                                       ADVERTISE_1000XFULL)) {
5955                                 if (common & ADVERTISE_1000XFULL)
5956                                         current_duplex = DUPLEX_FULL;
5957                                 else
5958                                         current_duplex = DUPLEX_HALF;
5959
5960                                 tp->link_config.rmt_adv =
5961                                            mii_adv_to_ethtool_adv_x(remote_adv);
5962                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5963                                 /* Link is up via parallel detect */
5964                         } else {
5965                                 current_link_up = false;
5966                         }
5967                 }
5968         }
5969
5970 fiber_setup_done:
5971         if (current_link_up && current_duplex == DUPLEX_FULL)
5972                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5973
5974         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5975         if (tp->link_config.active_duplex == DUPLEX_HALF)
5976                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5977
5978         tw32_f(MAC_MODE, tp->mac_mode);
5979         udelay(40);
5980
5981         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5982
5983         tp->link_config.active_speed = current_speed;
5984         tp->link_config.active_duplex = current_duplex;
5985
5986         tg3_test_and_report_link_chg(tp, current_link_up);
5987         return err;
5988 }
5989
5990 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5991 {
5992         if (tp->serdes_counter) {
5993                 /* Give autoneg time to complete. */
5994                 tp->serdes_counter--;
5995                 return;
5996         }
5997
5998         if (!tp->link_up &&
5999             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6000                 u32 bmcr;
6001
6002                 tg3_readphy(tp, MII_BMCR, &bmcr);
6003                 if (bmcr & BMCR_ANENABLE) {
6004                         u32 phy1, phy2;
6005
6006                         /* Select shadow register 0x1f */
6007                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6008                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6009
6010                         /* Select expansion interrupt status register */
6011                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6012                                          MII_TG3_DSP_EXP1_INT_STAT);
6013                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6014                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6015
6016                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6017                                 /* We have signal detect and not receiving
6018                                  * config code words, link is up by parallel
6019                                  * detection.
6020                                  */
6021
6022                                 bmcr &= ~BMCR_ANENABLE;
6023                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6024                                 tg3_writephy(tp, MII_BMCR, bmcr);
6025                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6026                         }
6027                 }
6028         } else if (tp->link_up &&
6029                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6030                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6031                 u32 phy2;
6032
6033                 /* Select expansion interrupt status register */
6034                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6035                                  MII_TG3_DSP_EXP1_INT_STAT);
6036                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6037                 if (phy2 & 0x20) {
6038                         u32 bmcr;
6039
6040                         /* Config code words received, turn on autoneg. */
6041                         tg3_readphy(tp, MII_BMCR, &bmcr);
6042                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6043
6044                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6045
6046                 }
6047         }
6048 }
6049
6050 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6051 {
6052         u32 val;
6053         int err;
6054
6055         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6056                 err = tg3_setup_fiber_phy(tp, force_reset);
6057         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6058                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6059         else
6060                 err = tg3_setup_copper_phy(tp, force_reset);
6061
6062         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6063                 u32 scale;
6064
6065                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6066                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6067                         scale = 65;
6068                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6069                         scale = 6;
6070                 else
6071                         scale = 12;
6072
6073                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6074                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6075                 tw32(GRC_MISC_CFG, val);
6076         }
6077
6078         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6079               (6 << TX_LENGTHS_IPG_SHIFT);
6080         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6081             tg3_asic_rev(tp) == ASIC_REV_5762)
6082                 val |= tr32(MAC_TX_LENGTHS) &
6083                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
6084                         TX_LENGTHS_CNT_DWN_VAL_MSK);
6085
6086         if (tp->link_config.active_speed == SPEED_1000 &&
6087             tp->link_config.active_duplex == DUPLEX_HALF)
6088                 tw32(MAC_TX_LENGTHS, val |
6089                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6090         else
6091                 tw32(MAC_TX_LENGTHS, val |
6092                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6093
6094         if (!tg3_flag(tp, 5705_PLUS)) {
6095                 if (tp->link_up) {
6096                         tw32(HOSTCC_STAT_COAL_TICKS,
6097                              tp->coal.stats_block_coalesce_usecs);
6098                 } else {
6099                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
6100                 }
6101         }
6102
6103         if (tg3_flag(tp, ASPM_WORKAROUND)) {
6104                 val = tr32(PCIE_PWR_MGMT_THRESH);
6105                 if (!tp->link_up)
6106                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6107                               tp->pwrmgmt_thresh;
6108                 else
6109                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6110                 tw32(PCIE_PWR_MGMT_THRESH, val);
6111         }
6112
6113         return err;
6114 }
6115
6116 /* tp->lock must be held */
6117 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
6118 {
6119         u64 stamp;
6120
6121         ptp_read_system_prets(sts);
6122         stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6123         ptp_read_system_postts(sts);
6124         stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6125
6126         return stamp;
6127 }
6128
6129 /* tp->lock must be held */
6130 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6131 {
6132         u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6133
6134         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6135         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6136         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6137         tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6138 }
6139
6140 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6141 static inline void tg3_full_unlock(struct tg3 *tp);
6142 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6143 {
6144         struct tg3 *tp = netdev_priv(dev);
6145
6146         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6147                                 SOF_TIMESTAMPING_RX_SOFTWARE |
6148                                 SOF_TIMESTAMPING_SOFTWARE;
6149
6150         if (tg3_flag(tp, PTP_CAPABLE)) {
6151                 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6152                                         SOF_TIMESTAMPING_RX_HARDWARE |
6153                                         SOF_TIMESTAMPING_RAW_HARDWARE;
6154         }
6155
6156         if (tp->ptp_clock)
6157                 info->phc_index = ptp_clock_index(tp->ptp_clock);
6158         else
6159                 info->phc_index = -1;
6160
6161         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6162
6163         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6164                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6165                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6166                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6167         return 0;
6168 }
6169
6170 static int tg3_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
6171 {
6172         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6173         u64 correction;
6174         bool neg_adj;
6175
6176         /* Frequency adjustment is performed using hardware with a 24 bit
6177          * accumulator and a programmable correction value. On each clk, the
6178          * correction value gets added to the accumulator and when it
6179          * overflows, the time counter is incremented/decremented.
6180          */
6181         neg_adj = diff_by_scaled_ppm(1 << 24, scaled_ppm, &correction);
6182
6183         tg3_full_lock(tp, 0);
6184
6185         if (correction)
6186                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6187                      TG3_EAV_REF_CLK_CORRECT_EN |
6188                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) |
6189                      ((u32)correction & TG3_EAV_REF_CLK_CORRECT_MASK));
6190         else
6191                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6192
6193         tg3_full_unlock(tp);
6194
6195         return 0;
6196 }
6197
6198 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6199 {
6200         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6201
6202         tg3_full_lock(tp, 0);
6203         tp->ptp_adjust += delta;
6204         tg3_full_unlock(tp);
6205
6206         return 0;
6207 }
6208
6209 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
6210                             struct ptp_system_timestamp *sts)
6211 {
6212         u64 ns;
6213         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6214
6215         tg3_full_lock(tp, 0);
6216         ns = tg3_refclk_read(tp, sts);
6217         ns += tp->ptp_adjust;
6218         tg3_full_unlock(tp);
6219
6220         *ts = ns_to_timespec64(ns);
6221
6222         return 0;
6223 }
6224
6225 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6226                            const struct timespec64 *ts)
6227 {
6228         u64 ns;
6229         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6230
6231         ns = timespec64_to_ns(ts);
6232
6233         tg3_full_lock(tp, 0);
6234         tg3_refclk_write(tp, ns);
6235         tp->ptp_adjust = 0;
6236         tg3_full_unlock(tp);
6237
6238         return 0;
6239 }
6240
6241 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6242                           struct ptp_clock_request *rq, int on)
6243 {
6244         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6245         u32 clock_ctl;
6246         int rval = 0;
6247
6248         switch (rq->type) {
6249         case PTP_CLK_REQ_PEROUT:
6250                 /* Reject requests with unsupported flags */
6251                 if (rq->perout.flags)
6252                         return -EOPNOTSUPP;
6253
6254                 if (rq->perout.index != 0)
6255                         return -EINVAL;
6256
6257                 tg3_full_lock(tp, 0);
6258                 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6259                 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6260
6261                 if (on) {
6262                         u64 nsec;
6263
6264                         nsec = rq->perout.start.sec * 1000000000ULL +
6265                                rq->perout.start.nsec;
6266
6267                         if (rq->perout.period.sec || rq->perout.period.nsec) {
6268                                 netdev_warn(tp->dev,
6269                                             "Device supports only a one-shot timesync output, period must be 0\n");
6270                                 rval = -EINVAL;
6271                                 goto err_out;
6272                         }
6273
6274                         if (nsec & (1ULL << 63)) {
6275                                 netdev_warn(tp->dev,
6276                                             "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6277                                 rval = -EINVAL;
6278                                 goto err_out;
6279                         }
6280
6281                         tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6282                         tw32(TG3_EAV_WATCHDOG0_MSB,
6283                              TG3_EAV_WATCHDOG0_EN |
6284                              ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6285
6286                         tw32(TG3_EAV_REF_CLCK_CTL,
6287                              clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6288                 } else {
6289                         tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6290                         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6291                 }
6292
6293 err_out:
6294                 tg3_full_unlock(tp);
6295                 return rval;
6296
6297         default:
6298                 break;
6299         }
6300
6301         return -EOPNOTSUPP;
6302 }
6303
6304 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6305                                      struct skb_shared_hwtstamps *timestamp)
6306 {
6307         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6308         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6309                                            tp->ptp_adjust);
6310 }
6311
6312 static void tg3_read_tx_tstamp(struct tg3 *tp, u64 *hwclock)
6313 {
6314         *hwclock = tr32(TG3_TX_TSTAMP_LSB);
6315         *hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6316 }
6317
6318 static long tg3_ptp_ts_aux_work(struct ptp_clock_info *ptp)
6319 {
6320         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6321         struct skb_shared_hwtstamps timestamp;
6322         u64 hwclock;
6323
6324         if (tp->ptp_txts_retrycnt > 2)
6325                 goto done;
6326
6327         tg3_read_tx_tstamp(tp, &hwclock);
6328
6329         if (hwclock != tp->pre_tx_ts) {
6330                 tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6331                 skb_tstamp_tx(tp->tx_tstamp_skb, &timestamp);
6332                 goto done;
6333         }
6334         tp->ptp_txts_retrycnt++;
6335         return HZ / 10;
6336 done:
6337         dev_consume_skb_any(tp->tx_tstamp_skb);
6338         tp->tx_tstamp_skb = NULL;
6339         tp->ptp_txts_retrycnt = 0;
6340         tp->pre_tx_ts = 0;
6341         return -1;
6342 }
6343
6344 static const struct ptp_clock_info tg3_ptp_caps = {
6345         .owner          = THIS_MODULE,
6346         .name           = "tg3 clock",
6347         .max_adj        = 250000000,
6348         .n_alarm        = 0,
6349         .n_ext_ts       = 0,
6350         .n_per_out      = 1,
6351         .n_pins         = 0,
6352         .pps            = 0,
6353         .adjfine        = tg3_ptp_adjfine,
6354         .adjtime        = tg3_ptp_adjtime,
6355         .do_aux_work    = tg3_ptp_ts_aux_work,
6356         .gettimex64     = tg3_ptp_gettimex,
6357         .settime64      = tg3_ptp_settime,
6358         .enable         = tg3_ptp_enable,
6359 };
6360
6361 /* tp->lock must be held */
6362 static void tg3_ptp_init(struct tg3 *tp)
6363 {
6364         if (!tg3_flag(tp, PTP_CAPABLE))
6365                 return;
6366
6367         /* Initialize the hardware clock to the system time. */
6368         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6369         tp->ptp_adjust = 0;
6370         tp->ptp_info = tg3_ptp_caps;
6371 }
6372
6373 /* tp->lock must be held */
6374 static void tg3_ptp_resume(struct tg3 *tp)
6375 {
6376         if (!tg3_flag(tp, PTP_CAPABLE))
6377                 return;
6378
6379         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6380         tp->ptp_adjust = 0;
6381 }
6382
6383 static void tg3_ptp_fini(struct tg3 *tp)
6384 {
6385         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6386                 return;
6387
6388         ptp_clock_unregister(tp->ptp_clock);
6389         tp->ptp_clock = NULL;
6390         tp->ptp_adjust = 0;
6391         dev_consume_skb_any(tp->tx_tstamp_skb);
6392         tp->tx_tstamp_skb = NULL;
6393 }
6394
6395 static inline int tg3_irq_sync(struct tg3 *tp)
6396 {
6397         return tp->irq_sync;
6398 }
6399
6400 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6401 {
6402         int i;
6403
6404         dst = (u32 *)((u8 *)dst + off);
6405         for (i = 0; i < len; i += sizeof(u32))
6406                 *dst++ = tr32(off + i);
6407 }
6408
6409 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6410 {
6411         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6412         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6413         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6414         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6415         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6416         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6417         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6418         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6419         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6420         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6421         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6422         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6423         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6424         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6425         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6426         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6427         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6428         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6429         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6430
6431         if (tg3_flag(tp, SUPPORT_MSIX))
6432                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6433
6434         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6435         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6436         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6437         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6438         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6439         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6440         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6441         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6442
6443         if (!tg3_flag(tp, 5705_PLUS)) {
6444                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6445                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6446                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6447         }
6448
6449         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6450         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6451         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6452         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6453         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6454
6455         if (tg3_flag(tp, NVRAM))
6456                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6457 }
6458
6459 static void tg3_dump_state(struct tg3 *tp)
6460 {
6461         int i;
6462         u32 *regs;
6463
6464         /* If it is a PCI error, all registers will be 0xffff,
6465          * we don't dump them out, just report the error and return
6466          */
6467         if (tp->pdev->error_state != pci_channel_io_normal) {
6468                 netdev_err(tp->dev, "PCI channel ERROR!\n");
6469                 return;
6470         }
6471
6472         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6473         if (!regs)
6474                 return;
6475
6476         if (tg3_flag(tp, PCI_EXPRESS)) {
6477                 /* Read up to but not including private PCI registers */
6478                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6479                         regs[i / sizeof(u32)] = tr32(i);
6480         } else
6481                 tg3_dump_legacy_regs(tp, regs);
6482
6483         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6484                 if (!regs[i + 0] && !regs[i + 1] &&
6485                     !regs[i + 2] && !regs[i + 3])
6486                         continue;
6487
6488                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6489                            i * 4,
6490                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6491         }
6492
6493         kfree(regs);
6494
6495         for (i = 0; i < tp->irq_cnt; i++) {
6496                 struct tg3_napi *tnapi = &tp->napi[i];
6497
6498                 /* SW status block */
6499                 netdev_err(tp->dev,
6500                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6501                            i,
6502                            tnapi->hw_status->status,
6503                            tnapi->hw_status->status_tag,
6504                            tnapi->hw_status->rx_jumbo_consumer,
6505                            tnapi->hw_status->rx_consumer,
6506                            tnapi->hw_status->rx_mini_consumer,
6507                            tnapi->hw_status->idx[0].rx_producer,
6508                            tnapi->hw_status->idx[0].tx_consumer);
6509
6510                 netdev_err(tp->dev,
6511                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6512                            i,
6513                            tnapi->last_tag, tnapi->last_irq_tag,
6514                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6515                            tnapi->rx_rcb_ptr,
6516                            tnapi->prodring.rx_std_prod_idx,
6517                            tnapi->prodring.rx_std_cons_idx,
6518                            tnapi->prodring.rx_jmb_prod_idx,
6519                            tnapi->prodring.rx_jmb_cons_idx);
6520         }
6521 }
6522
6523 /* This is called whenever we suspect that the system chipset is re-
6524  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6525  * is bogus tx completions. We try to recover by setting the
6526  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6527  * in the workqueue.
6528  */
6529 static void tg3_tx_recover(struct tg3 *tp)
6530 {
6531         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6532                tp->write32_tx_mbox == tg3_write_indirect_mbox);
6533
6534         netdev_warn(tp->dev,
6535                     "The system may be re-ordering memory-mapped I/O "
6536                     "cycles to the network device, attempting to recover. "
6537                     "Please report the problem to the driver maintainer "
6538                     "and include system chipset information.\n");
6539
6540         tg3_flag_set(tp, TX_RECOVERY_PENDING);
6541 }
6542
6543 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6544 {
6545         /* Tell compiler to fetch tx indices from memory. */
6546         barrier();
6547         return tnapi->tx_pending -
6548                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6549 }
6550
6551 /* Tigon3 never reports partial packet sends.  So we do not
6552  * need special logic to handle SKBs that have not had all
6553  * of their frags sent yet, like SunGEM does.
6554  */
6555 static void tg3_tx(struct tg3_napi *tnapi)
6556 {
6557         struct tg3 *tp = tnapi->tp;
6558         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6559         u32 sw_idx = tnapi->tx_cons;
6560         struct netdev_queue *txq;
6561         int index = tnapi - tp->napi;
6562         unsigned int pkts_compl = 0, bytes_compl = 0;
6563
6564         if (tg3_flag(tp, ENABLE_TSS))
6565                 index--;
6566
6567         txq = netdev_get_tx_queue(tp->dev, index);
6568
6569         while (sw_idx != hw_idx) {
6570                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6571                 bool complete_skb_later = false;
6572                 struct sk_buff *skb = ri->skb;
6573                 int i, tx_bug = 0;
6574
6575                 if (unlikely(skb == NULL)) {
6576                         tg3_tx_recover(tp);
6577                         return;
6578                 }
6579
6580                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6581                         struct skb_shared_hwtstamps timestamp;
6582                         u64 hwclock;
6583
6584                         tg3_read_tx_tstamp(tp, &hwclock);
6585                         if (hwclock != tp->pre_tx_ts) {
6586                                 tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6587                                 skb_tstamp_tx(skb, &timestamp);
6588                                 tp->pre_tx_ts = 0;
6589                         } else {
6590                                 tp->tx_tstamp_skb = skb;
6591                                 complete_skb_later = true;
6592                         }
6593                 }
6594
6595                 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping),
6596                                  skb_headlen(skb), DMA_TO_DEVICE);
6597
6598                 ri->skb = NULL;
6599
6600                 while (ri->fragmented) {
6601                         ri->fragmented = false;
6602                         sw_idx = NEXT_TX(sw_idx);
6603                         ri = &tnapi->tx_buffers[sw_idx];
6604                 }
6605
6606                 sw_idx = NEXT_TX(sw_idx);
6607
6608                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6609                         ri = &tnapi->tx_buffers[sw_idx];
6610                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6611                                 tx_bug = 1;
6612
6613                         dma_unmap_page(&tp->pdev->dev,
6614                                        dma_unmap_addr(ri, mapping),
6615                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
6616                                        DMA_TO_DEVICE);
6617
6618                         while (ri->fragmented) {
6619                                 ri->fragmented = false;
6620                                 sw_idx = NEXT_TX(sw_idx);
6621                                 ri = &tnapi->tx_buffers[sw_idx];
6622                         }
6623
6624                         sw_idx = NEXT_TX(sw_idx);
6625                 }
6626
6627                 pkts_compl++;
6628                 bytes_compl += skb->len;
6629
6630                 if (!complete_skb_later)
6631                         dev_consume_skb_any(skb);
6632                 else
6633                         ptp_schedule_worker(tp->ptp_clock, 0);
6634
6635                 if (unlikely(tx_bug)) {
6636                         tg3_tx_recover(tp);
6637                         return;
6638                 }
6639         }
6640
6641         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6642
6643         tnapi->tx_cons = sw_idx;
6644
6645         /* Need to make the tx_cons update visible to __tg3_start_xmit()
6646          * before checking for netif_queue_stopped().  Without the
6647          * memory barrier, there is a small possibility that __tg3_start_xmit()
6648          * will miss it and cause the queue to be stopped forever.
6649          */
6650         smp_mb();
6651
6652         if (unlikely(netif_tx_queue_stopped(txq) &&
6653                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6654                 __netif_tx_lock(txq, smp_processor_id());
6655                 if (netif_tx_queue_stopped(txq) &&
6656                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6657                         netif_tx_wake_queue(txq);
6658                 __netif_tx_unlock(txq);
6659         }
6660 }
6661
6662 static void tg3_frag_free(bool is_frag, void *data)
6663 {
6664         if (is_frag)
6665                 skb_free_frag(data);
6666         else
6667                 kfree(data);
6668 }
6669
6670 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6671 {
6672         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6673                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6674
6675         if (!ri->data)
6676                 return;
6677
6678         dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz,
6679                          DMA_FROM_DEVICE);
6680         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6681         ri->data = NULL;
6682 }
6683
6684
6685 /* Returns size of skb allocated or < 0 on error.
6686  *
6687  * We only need to fill in the address because the other members
6688  * of the RX descriptor are invariant, see tg3_init_rings.
6689  *
6690  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6691  * posting buffers we only dirty the first cache line of the RX
6692  * descriptor (containing the address).  Whereas for the RX status
6693  * buffers the cpu only reads the last cacheline of the RX descriptor
6694  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6695  */
6696 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6697                              u32 opaque_key, u32 dest_idx_unmasked,
6698                              unsigned int *frag_size)
6699 {
6700         struct tg3_rx_buffer_desc *desc;
6701         struct ring_info *map;
6702         u8 *data;
6703         dma_addr_t mapping;
6704         int skb_size, data_size, dest_idx;
6705
6706         switch (opaque_key) {
6707         case RXD_OPAQUE_RING_STD:
6708                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6709                 desc = &tpr->rx_std[dest_idx];
6710                 map = &tpr->rx_std_buffers[dest_idx];
6711                 data_size = tp->rx_pkt_map_sz;
6712                 break;
6713
6714         case RXD_OPAQUE_RING_JUMBO:
6715                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6716                 desc = &tpr->rx_jmb[dest_idx].std;
6717                 map = &tpr->rx_jmb_buffers[dest_idx];
6718                 data_size = TG3_RX_JMB_MAP_SZ;
6719                 break;
6720
6721         default:
6722                 return -EINVAL;
6723         }
6724
6725         /* Do not overwrite any of the map or rp information
6726          * until we are sure we can commit to a new buffer.
6727          *
6728          * Callers depend upon this behavior and assume that
6729          * we leave everything unchanged if we fail.
6730          */
6731         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6732                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6733         if (skb_size <= PAGE_SIZE) {
6734                 data = napi_alloc_frag(skb_size);
6735                 *frag_size = skb_size;
6736         } else {
6737                 data = kmalloc(skb_size, GFP_ATOMIC);
6738                 *frag_size = 0;
6739         }
6740         if (!data)
6741                 return -ENOMEM;
6742
6743         mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp),
6744                                  data_size, DMA_FROM_DEVICE);
6745         if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
6746                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6747                 return -EIO;
6748         }
6749
6750         map->data = data;
6751         dma_unmap_addr_set(map, mapping, mapping);
6752
6753         desc->addr_hi = ((u64)mapping >> 32);
6754         desc->addr_lo = ((u64)mapping & 0xffffffff);
6755
6756         return data_size;
6757 }
6758
6759 /* We only need to move over in the address because the other
6760  * members of the RX descriptor are invariant.  See notes above
6761  * tg3_alloc_rx_data for full details.
6762  */
6763 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6764                            struct tg3_rx_prodring_set *dpr,
6765                            u32 opaque_key, int src_idx,
6766                            u32 dest_idx_unmasked)
6767 {
6768         struct tg3 *tp = tnapi->tp;
6769         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6770         struct ring_info *src_map, *dest_map;
6771         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6772         int dest_idx;
6773
6774         switch (opaque_key) {
6775         case RXD_OPAQUE_RING_STD:
6776                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6777                 dest_desc = &dpr->rx_std[dest_idx];
6778                 dest_map = &dpr->rx_std_buffers[dest_idx];
6779                 src_desc = &spr->rx_std[src_idx];
6780                 src_map = &spr->rx_std_buffers[src_idx];
6781                 break;
6782
6783         case RXD_OPAQUE_RING_JUMBO:
6784                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6785                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6786                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6787                 src_desc = &spr->rx_jmb[src_idx].std;
6788                 src_map = &spr->rx_jmb_buffers[src_idx];
6789                 break;
6790
6791         default:
6792                 return;
6793         }
6794
6795         dest_map->data = src_map->data;
6796         dma_unmap_addr_set(dest_map, mapping,
6797                            dma_unmap_addr(src_map, mapping));
6798         dest_desc->addr_hi = src_desc->addr_hi;
6799         dest_desc->addr_lo = src_desc->addr_lo;
6800
6801         /* Ensure that the update to the skb happens after the physical
6802          * addresses have been transferred to the new BD location.
6803          */
6804         smp_wmb();
6805
6806         src_map->data = NULL;
6807 }
6808
6809 /* The RX ring scheme is composed of multiple rings which post fresh
6810  * buffers to the chip, and one special ring the chip uses to report
6811  * status back to the host.
6812  *
6813  * The special ring reports the status of received packets to the
6814  * host.  The chip does not write into the original descriptor the
6815  * RX buffer was obtained from.  The chip simply takes the original
6816  * descriptor as provided by the host, updates the status and length
6817  * field, then writes this into the next status ring entry.
6818  *
6819  * Each ring the host uses to post buffers to the chip is described
6820  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6821  * it is first placed into the on-chip ram.  When the packet's length
6822  * is known, it walks down the TG3_BDINFO entries to select the ring.
6823  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6824  * which is within the range of the new packet's length is chosen.
6825  *
6826  * The "separate ring for rx status" scheme may sound queer, but it makes
6827  * sense from a cache coherency perspective.  If only the host writes
6828  * to the buffer post rings, and only the chip writes to the rx status
6829  * rings, then cache lines never move beyond shared-modified state.
6830  * If both the host and chip were to write into the same ring, cache line
6831  * eviction could occur since both entities want it in an exclusive state.
6832  */
6833 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6834 {
6835         struct tg3 *tp = tnapi->tp;
6836         u32 work_mask, rx_std_posted = 0;
6837         u32 std_prod_idx, jmb_prod_idx;
6838         u32 sw_idx = tnapi->rx_rcb_ptr;
6839         u16 hw_idx;
6840         int received;
6841         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6842
6843         hw_idx = *(tnapi->rx_rcb_prod_idx);
6844         /*
6845          * We need to order the read of hw_idx and the read of
6846          * the opaque cookie.
6847          */
6848         rmb();
6849         work_mask = 0;
6850         received = 0;
6851         std_prod_idx = tpr->rx_std_prod_idx;
6852         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6853         while (sw_idx != hw_idx && budget > 0) {
6854                 struct ring_info *ri;
6855                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6856                 unsigned int len;
6857                 struct sk_buff *skb;
6858                 dma_addr_t dma_addr;
6859                 u32 opaque_key, desc_idx, *post_ptr;
6860                 u8 *data;
6861                 u64 tstamp = 0;
6862
6863                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6864                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6865                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6866                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6867                         dma_addr = dma_unmap_addr(ri, mapping);
6868                         data = ri->data;
6869                         post_ptr = &std_prod_idx;
6870                         rx_std_posted++;
6871                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6872                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6873                         dma_addr = dma_unmap_addr(ri, mapping);
6874                         data = ri->data;
6875                         post_ptr = &jmb_prod_idx;
6876                 } else
6877                         goto next_pkt_nopost;
6878
6879                 work_mask |= opaque_key;
6880
6881                 if (desc->err_vlan & RXD_ERR_MASK) {
6882                 drop_it:
6883                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6884                                        desc_idx, *post_ptr);
6885                 drop_it_no_recycle:
6886                         /* Other statistics kept track of by card. */
6887                         tnapi->rx_dropped++;
6888                         goto next_pkt;
6889                 }
6890
6891                 prefetch(data + TG3_RX_OFFSET(tp));
6892                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6893                       ETH_FCS_LEN;
6894
6895                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6896                      RXD_FLAG_PTPSTAT_PTPV1 ||
6897                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6898                      RXD_FLAG_PTPSTAT_PTPV2) {
6899                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6900                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6901                 }
6902
6903                 if (len > TG3_RX_COPY_THRESH(tp)) {
6904                         int skb_size;
6905                         unsigned int frag_size;
6906
6907                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6908                                                     *post_ptr, &frag_size);
6909                         if (skb_size < 0)
6910                                 goto drop_it;
6911
6912                         dma_unmap_single(&tp->pdev->dev, dma_addr, skb_size,
6913                                          DMA_FROM_DEVICE);
6914
6915                         /* Ensure that the update to the data happens
6916                          * after the usage of the old DMA mapping.
6917                          */
6918                         smp_wmb();
6919
6920                         ri->data = NULL;
6921
6922                         if (frag_size)
6923                                 skb = build_skb(data, frag_size);
6924                         else
6925                                 skb = slab_build_skb(data);
6926                         if (!skb) {
6927                                 tg3_frag_free(frag_size != 0, data);
6928                                 goto drop_it_no_recycle;
6929                         }
6930                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6931                 } else {
6932                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6933                                        desc_idx, *post_ptr);
6934
6935                         skb = netdev_alloc_skb(tp->dev,
6936                                                len + TG3_RAW_IP_ALIGN);
6937                         if (skb == NULL)
6938                                 goto drop_it_no_recycle;
6939
6940                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6941                         dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len,
6942                                                 DMA_FROM_DEVICE);
6943                         memcpy(skb->data,
6944                                data + TG3_RX_OFFSET(tp),
6945                                len);
6946                         dma_sync_single_for_device(&tp->pdev->dev, dma_addr,
6947                                                    len, DMA_FROM_DEVICE);
6948                 }
6949
6950                 skb_put(skb, len);
6951                 if (tstamp)
6952                         tg3_hwclock_to_timestamp(tp, tstamp,
6953                                                  skb_hwtstamps(skb));
6954
6955                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6956                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6957                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6958                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6959                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6960                 else
6961                         skb_checksum_none_assert(skb);
6962
6963                 skb->protocol = eth_type_trans(skb, tp->dev);
6964
6965                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6966                     skb->protocol != htons(ETH_P_8021Q) &&
6967                     skb->protocol != htons(ETH_P_8021AD)) {
6968                         dev_kfree_skb_any(skb);
6969                         goto drop_it_no_recycle;
6970                 }
6971
6972                 if (desc->type_flags & RXD_FLAG_VLAN &&
6973                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6974                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6975                                                desc->err_vlan & RXD_VLAN_MASK);
6976
6977                 napi_gro_receive(&tnapi->napi, skb);
6978
6979                 received++;
6980                 budget--;
6981
6982 next_pkt:
6983                 (*post_ptr)++;
6984
6985                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6986                         tpr->rx_std_prod_idx = std_prod_idx &
6987                                                tp->rx_std_ring_mask;
6988                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6989                                      tpr->rx_std_prod_idx);
6990                         work_mask &= ~RXD_OPAQUE_RING_STD;
6991                         rx_std_posted = 0;
6992                 }
6993 next_pkt_nopost:
6994                 sw_idx++;
6995                 sw_idx &= tp->rx_ret_ring_mask;
6996
6997                 /* Refresh hw_idx to see if there is new work */
6998                 if (sw_idx == hw_idx) {
6999                         hw_idx = *(tnapi->rx_rcb_prod_idx);
7000                         rmb();
7001                 }
7002         }
7003
7004         /* ACK the status ring. */
7005         tnapi->rx_rcb_ptr = sw_idx;
7006         tw32_rx_mbox(tnapi->consmbox, sw_idx);
7007
7008         /* Refill RX ring(s). */
7009         if (!tg3_flag(tp, ENABLE_RSS)) {
7010                 /* Sync BD data before updating mailbox */
7011                 wmb();
7012
7013                 if (work_mask & RXD_OPAQUE_RING_STD) {
7014                         tpr->rx_std_prod_idx = std_prod_idx &
7015                                                tp->rx_std_ring_mask;
7016                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7017                                      tpr->rx_std_prod_idx);
7018                 }
7019                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
7020                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
7021                                                tp->rx_jmb_ring_mask;
7022                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7023                                      tpr->rx_jmb_prod_idx);
7024                 }
7025         } else if (work_mask) {
7026                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
7027                  * updated before the producer indices can be updated.
7028                  */
7029                 smp_wmb();
7030
7031                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
7032                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
7033
7034                 if (tnapi != &tp->napi[1]) {
7035                         tp->rx_refill = true;
7036                         napi_schedule(&tp->napi[1].napi);
7037                 }
7038         }
7039
7040         return received;
7041 }
7042
7043 static void tg3_poll_link(struct tg3 *tp)
7044 {
7045         /* handle link change and other phy events */
7046         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7047                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7048
7049                 if (sblk->status & SD_STATUS_LINK_CHG) {
7050                         sblk->status = SD_STATUS_UPDATED |
7051                                        (sblk->status & ~SD_STATUS_LINK_CHG);
7052                         spin_lock(&tp->lock);
7053                         if (tg3_flag(tp, USE_PHYLIB)) {
7054                                 tw32_f(MAC_STATUS,
7055                                      (MAC_STATUS_SYNC_CHANGED |
7056                                       MAC_STATUS_CFG_CHANGED |
7057                                       MAC_STATUS_MI_COMPLETION |
7058                                       MAC_STATUS_LNKSTATE_CHANGED));
7059                                 udelay(40);
7060                         } else
7061                                 tg3_setup_phy(tp, false);
7062                         spin_unlock(&tp->lock);
7063                 }
7064         }
7065 }
7066
7067 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7068                                 struct tg3_rx_prodring_set *dpr,
7069                                 struct tg3_rx_prodring_set *spr)
7070 {
7071         u32 si, di, cpycnt, src_prod_idx;
7072         int i, err = 0;
7073
7074         while (1) {
7075                 src_prod_idx = spr->rx_std_prod_idx;
7076
7077                 /* Make sure updates to the rx_std_buffers[] entries and the
7078                  * standard producer index are seen in the correct order.
7079                  */
7080                 smp_rmb();
7081
7082                 if (spr->rx_std_cons_idx == src_prod_idx)
7083                         break;
7084
7085                 if (spr->rx_std_cons_idx < src_prod_idx)
7086                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7087                 else
7088                         cpycnt = tp->rx_std_ring_mask + 1 -
7089                                  spr->rx_std_cons_idx;
7090
7091                 cpycnt = min(cpycnt,
7092                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7093
7094                 si = spr->rx_std_cons_idx;
7095                 di = dpr->rx_std_prod_idx;
7096
7097                 for (i = di; i < di + cpycnt; i++) {
7098                         if (dpr->rx_std_buffers[i].data) {
7099                                 cpycnt = i - di;
7100                                 err = -ENOSPC;
7101                                 break;
7102                         }
7103                 }
7104
7105                 if (!cpycnt)
7106                         break;
7107
7108                 /* Ensure that updates to the rx_std_buffers ring and the
7109                  * shadowed hardware producer ring from tg3_recycle_skb() are
7110                  * ordered correctly WRT the skb check above.
7111                  */
7112                 smp_rmb();
7113
7114                 memcpy(&dpr->rx_std_buffers[di],
7115                        &spr->rx_std_buffers[si],
7116                        cpycnt * sizeof(struct ring_info));
7117
7118                 for (i = 0; i < cpycnt; i++, di++, si++) {
7119                         struct tg3_rx_buffer_desc *sbd, *dbd;
7120                         sbd = &spr->rx_std[si];
7121                         dbd = &dpr->rx_std[di];
7122                         dbd->addr_hi = sbd->addr_hi;
7123                         dbd->addr_lo = sbd->addr_lo;
7124                 }
7125
7126                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7127                                        tp->rx_std_ring_mask;
7128                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7129                                        tp->rx_std_ring_mask;
7130         }
7131
7132         while (1) {
7133                 src_prod_idx = spr->rx_jmb_prod_idx;
7134
7135                 /* Make sure updates to the rx_jmb_buffers[] entries and
7136                  * the jumbo producer index are seen in the correct order.
7137                  */
7138                 smp_rmb();
7139
7140                 if (spr->rx_jmb_cons_idx == src_prod_idx)
7141                         break;
7142
7143                 if (spr->rx_jmb_cons_idx < src_prod_idx)
7144                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7145                 else
7146                         cpycnt = tp->rx_jmb_ring_mask + 1 -
7147                                  spr->rx_jmb_cons_idx;
7148
7149                 cpycnt = min(cpycnt,
7150                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7151
7152                 si = spr->rx_jmb_cons_idx;
7153                 di = dpr->rx_jmb_prod_idx;
7154
7155                 for (i = di; i < di + cpycnt; i++) {
7156                         if (dpr->rx_jmb_buffers[i].data) {
7157                                 cpycnt = i - di;
7158                                 err = -ENOSPC;
7159                                 break;
7160                         }
7161                 }
7162
7163                 if (!cpycnt)
7164                         break;
7165
7166                 /* Ensure that updates to the rx_jmb_buffers ring and the
7167                  * shadowed hardware producer ring from tg3_recycle_skb() are
7168                  * ordered correctly WRT the skb check above.
7169                  */
7170                 smp_rmb();
7171
7172                 memcpy(&dpr->rx_jmb_buffers[di],
7173                        &spr->rx_jmb_buffers[si],
7174                        cpycnt * sizeof(struct ring_info));
7175
7176                 for (i = 0; i < cpycnt; i++, di++, si++) {
7177                         struct tg3_rx_buffer_desc *sbd, *dbd;
7178                         sbd = &spr->rx_jmb[si].std;
7179                         dbd = &dpr->rx_jmb[di].std;
7180                         dbd->addr_hi = sbd->addr_hi;
7181                         dbd->addr_lo = sbd->addr_lo;
7182                 }
7183
7184                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7185                                        tp->rx_jmb_ring_mask;
7186                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7187                                        tp->rx_jmb_ring_mask;
7188         }
7189
7190         return err;
7191 }
7192
7193 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7194 {
7195         struct tg3 *tp = tnapi->tp;
7196
7197         /* run TX completion thread */
7198         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7199                 tg3_tx(tnapi);
7200                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7201                         return work_done;
7202         }
7203
7204         if (!tnapi->rx_rcb_prod_idx)
7205                 return work_done;
7206
7207         /* run RX thread, within the bounds set by NAPI.
7208          * All RX "locking" is done by ensuring outside
7209          * code synchronizes with tg3->napi.poll()
7210          */
7211         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7212                 work_done += tg3_rx(tnapi, budget - work_done);
7213
7214         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7215                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7216                 int i, err = 0;
7217                 u32 std_prod_idx = dpr->rx_std_prod_idx;
7218                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7219
7220                 tp->rx_refill = false;
7221                 for (i = 1; i <= tp->rxq_cnt; i++)
7222                         err |= tg3_rx_prodring_xfer(tp, dpr,
7223                                                     &tp->napi[i].prodring);
7224
7225                 wmb();
7226
7227                 if (std_prod_idx != dpr->rx_std_prod_idx)
7228                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7229                                      dpr->rx_std_prod_idx);
7230
7231                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7232                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7233                                      dpr->rx_jmb_prod_idx);
7234
7235                 if (err)
7236                         tw32_f(HOSTCC_MODE, tp->coal_now);
7237         }
7238
7239         return work_done;
7240 }
7241
7242 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7243 {
7244         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7245                 schedule_work(&tp->reset_task);
7246 }
7247
7248 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7249 {
7250         if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7251                 cancel_work_sync(&tp->reset_task);
7252         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7253 }
7254
7255 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7256 {
7257         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7258         struct tg3 *tp = tnapi->tp;
7259         int work_done = 0;
7260         struct tg3_hw_status *sblk = tnapi->hw_status;
7261
7262         while (1) {
7263                 work_done = tg3_poll_work(tnapi, work_done, budget);
7264
7265                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7266                         goto tx_recovery;
7267
7268                 if (unlikely(work_done >= budget))
7269                         break;
7270
7271                 /* tp->last_tag is used in tg3_int_reenable() below
7272                  * to tell the hw how much work has been processed,
7273                  * so we must read it before checking for more work.
7274                  */
7275                 tnapi->last_tag = sblk->status_tag;
7276                 tnapi->last_irq_tag = tnapi->last_tag;
7277                 rmb();
7278
7279                 /* check for RX/TX work to do */
7280                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7281                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7282
7283                         /* This test here is not race free, but will reduce
7284                          * the number of interrupts by looping again.
7285                          */
7286                         if (tnapi == &tp->napi[1] && tp->rx_refill)
7287                                 continue;
7288
7289                         napi_complete_done(napi, work_done);
7290                         /* Reenable interrupts. */
7291                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7292
7293                         /* This test here is synchronized by napi_schedule()
7294                          * and napi_complete() to close the race condition.
7295                          */
7296                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7297                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
7298                                                   HOSTCC_MODE_ENABLE |
7299                                                   tnapi->coal_now);
7300                         }
7301                         break;
7302                 }
7303         }
7304
7305         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7306         return work_done;
7307
7308 tx_recovery:
7309         /* work_done is guaranteed to be less than budget. */
7310         napi_complete(napi);
7311         tg3_reset_task_schedule(tp);
7312         return work_done;
7313 }
7314
7315 static void tg3_process_error(struct tg3 *tp)
7316 {
7317         u32 val;
7318         bool real_error = false;
7319
7320         if (tg3_flag(tp, ERROR_PROCESSED))
7321                 return;
7322
7323         /* Check Flow Attention register */
7324         val = tr32(HOSTCC_FLOW_ATTN);
7325         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7326                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7327                 real_error = true;
7328         }
7329
7330         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7331                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7332                 real_error = true;
7333         }
7334
7335         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7336                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7337                 real_error = true;
7338         }
7339
7340         if (!real_error)
7341                 return;
7342
7343         tg3_dump_state(tp);
7344
7345         tg3_flag_set(tp, ERROR_PROCESSED);
7346         tg3_reset_task_schedule(tp);
7347 }
7348
7349 static int tg3_poll(struct napi_struct *napi, int budget)
7350 {
7351         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7352         struct tg3 *tp = tnapi->tp;
7353         int work_done = 0;
7354         struct tg3_hw_status *sblk = tnapi->hw_status;
7355
7356         while (1) {
7357                 if (sblk->status & SD_STATUS_ERROR)
7358                         tg3_process_error(tp);
7359
7360                 tg3_poll_link(tp);
7361
7362                 work_done = tg3_poll_work(tnapi, work_done, budget);
7363
7364                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7365                         goto tx_recovery;
7366
7367                 if (unlikely(work_done >= budget))
7368                         break;
7369
7370                 if (tg3_flag(tp, TAGGED_STATUS)) {
7371                         /* tp->last_tag is used in tg3_int_reenable() below
7372                          * to tell the hw how much work has been processed,
7373                          * so we must read it before checking for more work.
7374                          */
7375                         tnapi->last_tag = sblk->status_tag;
7376                         tnapi->last_irq_tag = tnapi->last_tag;
7377                         rmb();
7378                 } else
7379                         sblk->status &= ~SD_STATUS_UPDATED;
7380
7381                 if (likely(!tg3_has_work(tnapi))) {
7382                         napi_complete_done(napi, work_done);
7383                         tg3_int_reenable(tnapi);
7384                         break;
7385                 }
7386         }
7387
7388         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7389         return work_done;
7390
7391 tx_recovery:
7392         /* work_done is guaranteed to be less than budget. */
7393         napi_complete(napi);
7394         tg3_reset_task_schedule(tp);
7395         return work_done;
7396 }
7397
7398 static void tg3_napi_disable(struct tg3 *tp)
7399 {
7400         int i;
7401
7402         for (i = tp->irq_cnt - 1; i >= 0; i--)
7403                 napi_disable(&tp->napi[i].napi);
7404 }
7405
7406 static void tg3_napi_enable(struct tg3 *tp)
7407 {
7408         int i;
7409
7410         for (i = 0; i < tp->irq_cnt; i++)
7411                 napi_enable(&tp->napi[i].napi);
7412 }
7413
7414 static void tg3_napi_init(struct tg3 *tp)
7415 {
7416         int i;
7417
7418         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll);
7419         for (i = 1; i < tp->irq_cnt; i++)
7420                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix);
7421 }
7422
7423 static void tg3_napi_fini(struct tg3 *tp)
7424 {
7425         int i;
7426
7427         for (i = 0; i < tp->irq_cnt; i++)
7428                 netif_napi_del(&tp->napi[i].napi);
7429 }
7430
7431 static inline void tg3_netif_stop(struct tg3 *tp)
7432 {
7433         netif_trans_update(tp->dev);    /* prevent tx timeout */
7434         tg3_napi_disable(tp);
7435         netif_carrier_off(tp->dev);
7436         netif_tx_disable(tp->dev);
7437 }
7438
7439 /* tp->lock must be held */
7440 static inline void tg3_netif_start(struct tg3 *tp)
7441 {
7442         tg3_ptp_resume(tp);
7443
7444         /* NOTE: unconditional netif_tx_wake_all_queues is only
7445          * appropriate so long as all callers are assured to
7446          * have free tx slots (such as after tg3_init_hw)
7447          */
7448         netif_tx_wake_all_queues(tp->dev);
7449
7450         if (tp->link_up)
7451                 netif_carrier_on(tp->dev);
7452
7453         tg3_napi_enable(tp);
7454         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7455         tg3_enable_ints(tp);
7456 }
7457
7458 static void tg3_irq_quiesce(struct tg3 *tp)
7459         __releases(tp->lock)
7460         __acquires(tp->lock)
7461 {
7462         int i;
7463
7464         BUG_ON(tp->irq_sync);
7465
7466         tp->irq_sync = 1;
7467         smp_mb();
7468
7469         spin_unlock_bh(&tp->lock);
7470
7471         for (i = 0; i < tp->irq_cnt; i++)
7472                 synchronize_irq(tp->napi[i].irq_vec);
7473
7474         spin_lock_bh(&tp->lock);
7475 }
7476
7477 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7478  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7479  * with as well.  Most of the time, this is not necessary except when
7480  * shutting down the device.
7481  */
7482 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7483 {
7484         spin_lock_bh(&tp->lock);
7485         if (irq_sync)
7486                 tg3_irq_quiesce(tp);
7487 }
7488
7489 static inline void tg3_full_unlock(struct tg3 *tp)
7490 {
7491         spin_unlock_bh(&tp->lock);
7492 }
7493
7494 /* One-shot MSI handler - Chip automatically disables interrupt
7495  * after sending MSI so driver doesn't have to do it.
7496  */
7497 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7498 {
7499         struct tg3_napi *tnapi = dev_id;
7500         struct tg3 *tp = tnapi->tp;
7501
7502         prefetch(tnapi->hw_status);
7503         if (tnapi->rx_rcb)
7504                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7505
7506         if (likely(!tg3_irq_sync(tp)))
7507                 napi_schedule(&tnapi->napi);
7508
7509         return IRQ_HANDLED;
7510 }
7511
7512 /* MSI ISR - No need to check for interrupt sharing and no need to
7513  * flush status block and interrupt mailbox. PCI ordering rules
7514  * guarantee that MSI will arrive after the status block.
7515  */
7516 static irqreturn_t tg3_msi(int irq, void *dev_id)
7517 {
7518         struct tg3_napi *tnapi = dev_id;
7519         struct tg3 *tp = tnapi->tp;
7520
7521         prefetch(tnapi->hw_status);
7522         if (tnapi->rx_rcb)
7523                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7524         /*
7525          * Writing any value to intr-mbox-0 clears PCI INTA# and
7526          * chip-internal interrupt pending events.
7527          * Writing non-zero to intr-mbox-0 additional tells the
7528          * NIC to stop sending us irqs, engaging "in-intr-handler"
7529          * event coalescing.
7530          */
7531         tw32_mailbox(tnapi->int_mbox, 0x00000001);
7532         if (likely(!tg3_irq_sync(tp)))
7533                 napi_schedule(&tnapi->napi);
7534
7535         return IRQ_RETVAL(1);
7536 }
7537
7538 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7539 {
7540         struct tg3_napi *tnapi = dev_id;
7541         struct tg3 *tp = tnapi->tp;
7542         struct tg3_hw_status *sblk = tnapi->hw_status;
7543         unsigned int handled = 1;
7544
7545         /* In INTx mode, it is possible for the interrupt to arrive at
7546          * the CPU before the status block posted prior to the interrupt.
7547          * Reading the PCI State register will confirm whether the
7548          * interrupt is ours and will flush the status block.
7549          */
7550         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7551                 if (tg3_flag(tp, CHIP_RESETTING) ||
7552                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7553                         handled = 0;
7554                         goto out;
7555                 }
7556         }
7557
7558         /*
7559          * Writing any value to intr-mbox-0 clears PCI INTA# and
7560          * chip-internal interrupt pending events.
7561          * Writing non-zero to intr-mbox-0 additional tells the
7562          * NIC to stop sending us irqs, engaging "in-intr-handler"
7563          * event coalescing.
7564          *
7565          * Flush the mailbox to de-assert the IRQ immediately to prevent
7566          * spurious interrupts.  The flush impacts performance but
7567          * excessive spurious interrupts can be worse in some cases.
7568          */
7569         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7570         if (tg3_irq_sync(tp))
7571                 goto out;
7572         sblk->status &= ~SD_STATUS_UPDATED;
7573         if (likely(tg3_has_work(tnapi))) {
7574                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7575                 napi_schedule(&tnapi->napi);
7576         } else {
7577                 /* No work, shared interrupt perhaps?  re-enable
7578                  * interrupts, and flush that PCI write
7579                  */
7580                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7581                                0x00000000);
7582         }
7583 out:
7584         return IRQ_RETVAL(handled);
7585 }
7586
7587 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7588 {
7589         struct tg3_napi *tnapi = dev_id;
7590         struct tg3 *tp = tnapi->tp;
7591         struct tg3_hw_status *sblk = tnapi->hw_status;
7592         unsigned int handled = 1;
7593
7594         /* In INTx mode, it is possible for the interrupt to arrive at
7595          * the CPU before the status block posted prior to the interrupt.
7596          * Reading the PCI State register will confirm whether the
7597          * interrupt is ours and will flush the status block.
7598          */
7599         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7600                 if (tg3_flag(tp, CHIP_RESETTING) ||
7601                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7602                         handled = 0;
7603                         goto out;
7604                 }
7605         }
7606
7607         /*
7608          * writing any value to intr-mbox-0 clears PCI INTA# and
7609          * chip-internal interrupt pending events.
7610          * writing non-zero to intr-mbox-0 additional tells the
7611          * NIC to stop sending us irqs, engaging "in-intr-handler"
7612          * event coalescing.
7613          *
7614          * Flush the mailbox to de-assert the IRQ immediately to prevent
7615          * spurious interrupts.  The flush impacts performance but
7616          * excessive spurious interrupts can be worse in some cases.
7617          */
7618         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7619
7620         /*
7621          * In a shared interrupt configuration, sometimes other devices'
7622          * interrupts will scream.  We record the current status tag here
7623          * so that the above check can report that the screaming interrupts
7624          * are unhandled.  Eventually they will be silenced.
7625          */
7626         tnapi->last_irq_tag = sblk->status_tag;
7627
7628         if (tg3_irq_sync(tp))
7629                 goto out;
7630
7631         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7632
7633         napi_schedule(&tnapi->napi);
7634
7635 out:
7636         return IRQ_RETVAL(handled);
7637 }
7638
7639 /* ISR for interrupt test */
7640 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7641 {
7642         struct tg3_napi *tnapi = dev_id;
7643         struct tg3 *tp = tnapi->tp;
7644         struct tg3_hw_status *sblk = tnapi->hw_status;
7645
7646         if ((sblk->status & SD_STATUS_UPDATED) ||
7647             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7648                 tg3_disable_ints(tp);
7649                 return IRQ_RETVAL(1);
7650         }
7651         return IRQ_RETVAL(0);
7652 }
7653
7654 #ifdef CONFIG_NET_POLL_CONTROLLER
7655 static void tg3_poll_controller(struct net_device *dev)
7656 {
7657         int i;
7658         struct tg3 *tp = netdev_priv(dev);
7659
7660         if (tg3_irq_sync(tp))
7661                 return;
7662
7663         for (i = 0; i < tp->irq_cnt; i++)
7664                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7665 }
7666 #endif
7667
7668 static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue)
7669 {
7670         struct tg3 *tp = netdev_priv(dev);
7671
7672         if (netif_msg_tx_err(tp)) {
7673                 netdev_err(dev, "transmit timed out, resetting\n");
7674                 tg3_dump_state(tp);
7675         }
7676
7677         tg3_reset_task_schedule(tp);
7678 }
7679
7680 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7681 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7682 {
7683         u32 base = (u32) mapping & 0xffffffff;
7684
7685         return base + len + 8 < base;
7686 }
7687
7688 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7689  * of any 4GB boundaries: 4G, 8G, etc
7690  */
7691 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7692                                            u32 len, u32 mss)
7693 {
7694         if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7695                 u32 base = (u32) mapping & 0xffffffff;
7696
7697                 return ((base + len + (mss & 0x3fff)) < base);
7698         }
7699         return 0;
7700 }
7701
7702 /* Test for DMA addresses > 40-bit */
7703 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7704                                           int len)
7705 {
7706 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7707         if (tg3_flag(tp, 40BIT_DMA_BUG))
7708                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7709         return 0;
7710 #else
7711         return 0;
7712 #endif
7713 }
7714
7715 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7716                                  dma_addr_t mapping, u32 len, u32 flags,
7717                                  u32 mss, u32 vlan)
7718 {
7719         txbd->addr_hi = ((u64) mapping >> 32);
7720         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7721         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7722         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7723 }
7724
7725 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7726                             dma_addr_t map, u32 len, u32 flags,
7727                             u32 mss, u32 vlan)
7728 {
7729         struct tg3 *tp = tnapi->tp;
7730         bool hwbug = false;
7731
7732         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7733                 hwbug = true;
7734
7735         if (tg3_4g_overflow_test(map, len))
7736                 hwbug = true;
7737
7738         if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7739                 hwbug = true;
7740
7741         if (tg3_40bit_overflow_test(tp, map, len))
7742                 hwbug = true;
7743
7744         if (tp->dma_limit) {
7745                 u32 prvidx = *entry;
7746                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7747                 while (len > tp->dma_limit && *budget) {
7748                         u32 frag_len = tp->dma_limit;
7749                         len -= tp->dma_limit;
7750
7751                         /* Avoid the 8byte DMA problem */
7752                         if (len <= 8) {
7753                                 len += tp->dma_limit / 2;
7754                                 frag_len = tp->dma_limit / 2;
7755                         }
7756
7757                         tnapi->tx_buffers[*entry].fragmented = true;
7758
7759                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7760                                       frag_len, tmp_flag, mss, vlan);
7761                         *budget -= 1;
7762                         prvidx = *entry;
7763                         *entry = NEXT_TX(*entry);
7764
7765                         map += frag_len;
7766                 }
7767
7768                 if (len) {
7769                         if (*budget) {
7770                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7771                                               len, flags, mss, vlan);
7772                                 *budget -= 1;
7773                                 *entry = NEXT_TX(*entry);
7774                         } else {
7775                                 hwbug = true;
7776                                 tnapi->tx_buffers[prvidx].fragmented = false;
7777                         }
7778                 }
7779         } else {
7780                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7781                               len, flags, mss, vlan);
7782                 *entry = NEXT_TX(*entry);
7783         }
7784
7785         return hwbug;
7786 }
7787
7788 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7789 {
7790         int i;
7791         struct sk_buff *skb;
7792         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7793
7794         skb = txb->skb;
7795         txb->skb = NULL;
7796
7797         dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping),
7798                          skb_headlen(skb), DMA_TO_DEVICE);
7799
7800         while (txb->fragmented) {
7801                 txb->fragmented = false;
7802                 entry = NEXT_TX(entry);
7803                 txb = &tnapi->tx_buffers[entry];
7804         }
7805
7806         for (i = 0; i <= last; i++) {
7807                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7808
7809                 entry = NEXT_TX(entry);
7810                 txb = &tnapi->tx_buffers[entry];
7811
7812                 dma_unmap_page(&tnapi->tp->pdev->dev,
7813                                dma_unmap_addr(txb, mapping),
7814                                skb_frag_size(frag), DMA_TO_DEVICE);
7815
7816                 while (txb->fragmented) {
7817                         txb->fragmented = false;
7818                         entry = NEXT_TX(entry);
7819                         txb = &tnapi->tx_buffers[entry];
7820                 }
7821         }
7822 }
7823
7824 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7825 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7826                                        struct sk_buff **pskb,
7827                                        u32 *entry, u32 *budget,
7828                                        u32 base_flags, u32 mss, u32 vlan)
7829 {
7830         struct tg3 *tp = tnapi->tp;
7831         struct sk_buff *new_skb, *skb = *pskb;
7832         dma_addr_t new_addr = 0;
7833         int ret = 0;
7834
7835         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7836                 new_skb = skb_copy(skb, GFP_ATOMIC);
7837         else {
7838                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7839
7840                 new_skb = skb_copy_expand(skb,
7841                                           skb_headroom(skb) + more_headroom,
7842                                           skb_tailroom(skb), GFP_ATOMIC);
7843         }
7844
7845         if (!new_skb) {
7846                 ret = -1;
7847         } else {
7848                 /* New SKB is guaranteed to be linear. */
7849                 new_addr = dma_map_single(&tp->pdev->dev, new_skb->data,
7850                                           new_skb->len, DMA_TO_DEVICE);
7851                 /* Make sure the mapping succeeded */
7852                 if (dma_mapping_error(&tp->pdev->dev, new_addr)) {
7853                         dev_kfree_skb_any(new_skb);
7854                         ret = -1;
7855                 } else {
7856                         u32 save_entry = *entry;
7857
7858                         base_flags |= TXD_FLAG_END;
7859
7860                         tnapi->tx_buffers[*entry].skb = new_skb;
7861                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7862                                            mapping, new_addr);
7863
7864                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7865                                             new_skb->len, base_flags,
7866                                             mss, vlan)) {
7867                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7868                                 dev_kfree_skb_any(new_skb);
7869                                 ret = -1;
7870                         }
7871                 }
7872         }
7873
7874         dev_consume_skb_any(skb);
7875         *pskb = new_skb;
7876         return ret;
7877 }
7878
7879 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7880 {
7881         /* Check if we will never have enough descriptors,
7882          * as gso_segs can be more than current ring size
7883          */
7884         return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7885 }
7886
7887 static netdev_tx_t __tg3_start_xmit(struct sk_buff *, struct net_device *);
7888
7889 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7890  * indicated in tg3_tx_frag_set()
7891  */
7892 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7893                        struct netdev_queue *txq, struct sk_buff *skb)
7894 {
7895         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7896         struct sk_buff *segs, *seg, *next;
7897
7898         /* Estimate the number of fragments in the worst case */
7899         if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7900                 netif_tx_stop_queue(txq);
7901
7902                 /* netif_tx_stop_queue() must be done before checking
7903                  * checking tx index in tg3_tx_avail() below, because in
7904                  * tg3_tx(), we update tx index before checking for
7905                  * netif_tx_queue_stopped().
7906                  */
7907                 smp_mb();
7908                 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7909                         return NETDEV_TX_BUSY;
7910
7911                 netif_tx_wake_queue(txq);
7912         }
7913
7914         segs = skb_gso_segment(skb, tp->dev->features &
7915                                     ~(NETIF_F_TSO | NETIF_F_TSO6));
7916         if (IS_ERR(segs) || !segs) {
7917                 tnapi->tx_dropped++;
7918                 goto tg3_tso_bug_end;
7919         }
7920
7921         skb_list_walk_safe(segs, seg, next) {
7922                 skb_mark_not_on_list(seg);
7923                 __tg3_start_xmit(seg, tp->dev);
7924         }
7925
7926 tg3_tso_bug_end:
7927         dev_consume_skb_any(skb);
7928
7929         return NETDEV_TX_OK;
7930 }
7931
7932 /* hard_start_xmit for all devices */
7933 static netdev_tx_t __tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7934 {
7935         struct tg3 *tp = netdev_priv(dev);
7936         u32 len, entry, base_flags, mss, vlan = 0;
7937         u32 budget;
7938         int i = -1, would_hit_hwbug;
7939         dma_addr_t mapping;
7940         struct tg3_napi *tnapi;
7941         struct netdev_queue *txq;
7942         unsigned int last;
7943         struct iphdr *iph = NULL;
7944         struct tcphdr *tcph = NULL;
7945         __sum16 tcp_csum = 0, ip_csum = 0;
7946         __be16 ip_tot_len = 0;
7947
7948         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7949         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7950         if (tg3_flag(tp, ENABLE_TSS))
7951                 tnapi++;
7952
7953         budget = tg3_tx_avail(tnapi);
7954
7955         /* We are running in BH disabled context with netif_tx_lock
7956          * and TX reclaim runs via tp->napi.poll inside of a software
7957          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7958          * no IRQ context deadlocks to worry about either.  Rejoice!
7959          */
7960         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7961                 if (!netif_tx_queue_stopped(txq)) {
7962                         netif_tx_stop_queue(txq);
7963
7964                         /* This is a hard error, log it. */
7965                         netdev_err(dev,
7966                                    "BUG! Tx Ring full when queue awake!\n");
7967                 }
7968                 return NETDEV_TX_BUSY;
7969         }
7970
7971         entry = tnapi->tx_prod;
7972         base_flags = 0;
7973
7974         mss = skb_shinfo(skb)->gso_size;
7975         if (mss) {
7976                 u32 tcp_opt_len, hdr_len;
7977
7978                 if (skb_cow_head(skb, 0))
7979                         goto drop;
7980
7981                 iph = ip_hdr(skb);
7982                 tcp_opt_len = tcp_optlen(skb);
7983
7984                 hdr_len = skb_tcp_all_headers(skb) - ETH_HLEN;
7985
7986                 /* HW/FW can not correctly segment packets that have been
7987                  * vlan encapsulated.
7988                  */
7989                 if (skb->protocol == htons(ETH_P_8021Q) ||
7990                     skb->protocol == htons(ETH_P_8021AD)) {
7991                         if (tg3_tso_bug_gso_check(tnapi, skb))
7992                                 return tg3_tso_bug(tp, tnapi, txq, skb);
7993                         goto drop;
7994                 }
7995
7996                 if (!skb_is_gso_v6(skb)) {
7997                         if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7998                             tg3_flag(tp, TSO_BUG)) {
7999                                 if (tg3_tso_bug_gso_check(tnapi, skb))
8000                                         return tg3_tso_bug(tp, tnapi, txq, skb);
8001                                 goto drop;
8002                         }
8003                         ip_csum = iph->check;
8004                         ip_tot_len = iph->tot_len;
8005                         iph->check = 0;
8006                         iph->tot_len = htons(mss + hdr_len);
8007                 }
8008
8009                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
8010                                TXD_FLAG_CPU_POST_DMA);
8011
8012                 tcph = tcp_hdr(skb);
8013                 tcp_csum = tcph->check;
8014
8015                 if (tg3_flag(tp, HW_TSO_1) ||
8016                     tg3_flag(tp, HW_TSO_2) ||
8017                     tg3_flag(tp, HW_TSO_3)) {
8018                         tcph->check = 0;
8019                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
8020                 } else {
8021                         tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
8022                                                          0, IPPROTO_TCP, 0);
8023                 }
8024
8025                 if (tg3_flag(tp, HW_TSO_3)) {
8026                         mss |= (hdr_len & 0xc) << 12;
8027                         if (hdr_len & 0x10)
8028                                 base_flags |= 0x00000010;
8029                         base_flags |= (hdr_len & 0x3e0) << 5;
8030                 } else if (tg3_flag(tp, HW_TSO_2))
8031                         mss |= hdr_len << 9;
8032                 else if (tg3_flag(tp, HW_TSO_1) ||
8033                          tg3_asic_rev(tp) == ASIC_REV_5705) {
8034                         if (tcp_opt_len || iph->ihl > 5) {
8035                                 int tsflags;
8036
8037                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8038                                 mss |= (tsflags << 11);
8039                         }
8040                 } else {
8041                         if (tcp_opt_len || iph->ihl > 5) {
8042                                 int tsflags;
8043
8044                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8045                                 base_flags |= tsflags << 12;
8046                         }
8047                 }
8048         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8049                 /* HW/FW can not correctly checksum packets that have been
8050                  * vlan encapsulated.
8051                  */
8052                 if (skb->protocol == htons(ETH_P_8021Q) ||
8053                     skb->protocol == htons(ETH_P_8021AD)) {
8054                         if (skb_checksum_help(skb))
8055                                 goto drop;
8056                 } else  {
8057                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
8058                 }
8059         }
8060
8061         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8062             !mss && skb->len > VLAN_ETH_FRAME_LEN)
8063                 base_flags |= TXD_FLAG_JMB_PKT;
8064
8065         if (skb_vlan_tag_present(skb)) {
8066                 base_flags |= TXD_FLAG_VLAN;
8067                 vlan = skb_vlan_tag_get(skb);
8068         }
8069
8070         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8071             tg3_flag(tp, TX_TSTAMP_EN)) {
8072                 tg3_full_lock(tp, 0);
8073                 if (!tp->pre_tx_ts) {
8074                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8075                         base_flags |= TXD_FLAG_HWTSTAMP;
8076                         tg3_read_tx_tstamp(tp, &tp->pre_tx_ts);
8077                 }
8078                 tg3_full_unlock(tp);
8079         }
8080
8081         len = skb_headlen(skb);
8082
8083         mapping = dma_map_single(&tp->pdev->dev, skb->data, len,
8084                                  DMA_TO_DEVICE);
8085         if (dma_mapping_error(&tp->pdev->dev, mapping))
8086                 goto drop;
8087
8088
8089         tnapi->tx_buffers[entry].skb = skb;
8090         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8091
8092         would_hit_hwbug = 0;
8093
8094         if (tg3_flag(tp, 5701_DMA_BUG))
8095                 would_hit_hwbug = 1;
8096
8097         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8098                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8099                             mss, vlan)) {
8100                 would_hit_hwbug = 1;
8101         } else if (skb_shinfo(skb)->nr_frags > 0) {
8102                 u32 tmp_mss = mss;
8103
8104                 if (!tg3_flag(tp, HW_TSO_1) &&
8105                     !tg3_flag(tp, HW_TSO_2) &&
8106                     !tg3_flag(tp, HW_TSO_3))
8107                         tmp_mss = 0;
8108
8109                 /* Now loop through additional data
8110                  * fragments, and queue them.
8111                  */
8112                 last = skb_shinfo(skb)->nr_frags - 1;
8113                 for (i = 0; i <= last; i++) {
8114                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8115
8116                         len = skb_frag_size(frag);
8117                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8118                                                    len, DMA_TO_DEVICE);
8119
8120                         tnapi->tx_buffers[entry].skb = NULL;
8121                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8122                                            mapping);
8123                         if (dma_mapping_error(&tp->pdev->dev, mapping))
8124                                 goto dma_error;
8125
8126                         if (!budget ||
8127                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8128                                             len, base_flags |
8129                                             ((i == last) ? TXD_FLAG_END : 0),
8130                                             tmp_mss, vlan)) {
8131                                 would_hit_hwbug = 1;
8132                                 break;
8133                         }
8134                 }
8135         }
8136
8137         if (would_hit_hwbug) {
8138                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8139
8140                 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8141                         /* If it's a TSO packet, do GSO instead of
8142                          * allocating and copying to a large linear SKB
8143                          */
8144                         if (ip_tot_len) {
8145                                 iph->check = ip_csum;
8146                                 iph->tot_len = ip_tot_len;
8147                         }
8148                         tcph->check = tcp_csum;
8149                         return tg3_tso_bug(tp, tnapi, txq, skb);
8150                 }
8151
8152                 /* If the workaround fails due to memory/mapping
8153                  * failure, silently drop this packet.
8154                  */
8155                 entry = tnapi->tx_prod;
8156                 budget = tg3_tx_avail(tnapi);
8157                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8158                                                 base_flags, mss, vlan))
8159                         goto drop_nofree;
8160         }
8161
8162         skb_tx_timestamp(skb);
8163         netdev_tx_sent_queue(txq, skb->len);
8164
8165         /* Sync BD data before updating mailbox */
8166         wmb();
8167
8168         tnapi->tx_prod = entry;
8169         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8170                 netif_tx_stop_queue(txq);
8171
8172                 /* netif_tx_stop_queue() must be done before checking
8173                  * checking tx index in tg3_tx_avail() below, because in
8174                  * tg3_tx(), we update tx index before checking for
8175                  * netif_tx_queue_stopped().
8176                  */
8177                 smp_mb();
8178                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8179                         netif_tx_wake_queue(txq);
8180         }
8181
8182         return NETDEV_TX_OK;
8183
8184 dma_error:
8185         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8186         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8187 drop:
8188         dev_kfree_skb_any(skb);
8189 drop_nofree:
8190         tnapi->tx_dropped++;
8191         return NETDEV_TX_OK;
8192 }
8193
8194 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
8195 {
8196         struct netdev_queue *txq;
8197         u16 skb_queue_mapping;
8198         netdev_tx_t ret;
8199
8200         skb_queue_mapping = skb_get_queue_mapping(skb);
8201         txq = netdev_get_tx_queue(dev, skb_queue_mapping);
8202
8203         ret = __tg3_start_xmit(skb, dev);
8204
8205         /* Notify the hardware that packets are ready by updating the TX ring
8206          * tail pointer. We respect netdev_xmit_more() thus avoiding poking
8207          * the hardware for every packet. To guarantee forward progress the TX
8208          * ring must be drained when it is full as indicated by
8209          * netif_xmit_stopped(). This needs to happen even when the current
8210          * skb was dropped or rejected with NETDEV_TX_BUSY. Otherwise packets
8211          * queued by previous __tg3_start_xmit() calls might get stuck in
8212          * the queue forever.
8213          */
8214         if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
8215                 struct tg3_napi *tnapi;
8216                 struct tg3 *tp;
8217
8218                 tp = netdev_priv(dev);
8219                 tnapi = &tp->napi[skb_queue_mapping];
8220
8221                 if (tg3_flag(tp, ENABLE_TSS))
8222                         tnapi++;
8223
8224                 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
8225         }
8226
8227         return ret;
8228 }
8229
8230 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8231 {
8232         if (enable) {
8233                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8234                                   MAC_MODE_PORT_MODE_MASK);
8235
8236                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8237
8238                 if (!tg3_flag(tp, 5705_PLUS))
8239                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8240
8241                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8242                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8243                 else
8244                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8245         } else {
8246                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8247
8248                 if (tg3_flag(tp, 5705_PLUS) ||
8249                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8250                     tg3_asic_rev(tp) == ASIC_REV_5700)
8251                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8252         }
8253
8254         tw32(MAC_MODE, tp->mac_mode);
8255         udelay(40);
8256 }
8257
8258 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8259 {
8260         u32 val, bmcr, mac_mode, ptest = 0;
8261
8262         tg3_phy_toggle_apd(tp, false);
8263         tg3_phy_toggle_automdix(tp, false);
8264
8265         if (extlpbk && tg3_phy_set_extloopbk(tp))
8266                 return -EIO;
8267
8268         bmcr = BMCR_FULLDPLX;
8269         switch (speed) {
8270         case SPEED_10:
8271                 break;
8272         case SPEED_100:
8273                 bmcr |= BMCR_SPEED100;
8274                 break;
8275         case SPEED_1000:
8276         default:
8277                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8278                         speed = SPEED_100;
8279                         bmcr |= BMCR_SPEED100;
8280                 } else {
8281                         speed = SPEED_1000;
8282                         bmcr |= BMCR_SPEED1000;
8283                 }
8284         }
8285
8286         if (extlpbk) {
8287                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8288                         tg3_readphy(tp, MII_CTRL1000, &val);
8289                         val |= CTL1000_AS_MASTER |
8290                                CTL1000_ENABLE_MASTER;
8291                         tg3_writephy(tp, MII_CTRL1000, val);
8292                 } else {
8293                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8294                                 MII_TG3_FET_PTEST_TRIM_2;
8295                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8296                 }
8297         } else
8298                 bmcr |= BMCR_LOOPBACK;
8299
8300         tg3_writephy(tp, MII_BMCR, bmcr);
8301
8302         /* The write needs to be flushed for the FETs */
8303         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8304                 tg3_readphy(tp, MII_BMCR, &bmcr);
8305
8306         udelay(40);
8307
8308         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8309             tg3_asic_rev(tp) == ASIC_REV_5785) {
8310                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8311                              MII_TG3_FET_PTEST_FRC_TX_LINK |
8312                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
8313
8314                 /* The write needs to be flushed for the AC131 */
8315                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8316         }
8317
8318         /* Reset to prevent losing 1st rx packet intermittently */
8319         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8320             tg3_flag(tp, 5780_CLASS)) {
8321                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8322                 udelay(10);
8323                 tw32_f(MAC_RX_MODE, tp->rx_mode);
8324         }
8325
8326         mac_mode = tp->mac_mode &
8327                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8328         if (speed == SPEED_1000)
8329                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8330         else
8331                 mac_mode |= MAC_MODE_PORT_MODE_MII;
8332
8333         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8334                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8335
8336                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8337                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8338                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8339                         mac_mode |= MAC_MODE_LINK_POLARITY;
8340
8341                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8342                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8343         }
8344
8345         tw32(MAC_MODE, mac_mode);
8346         udelay(40);
8347
8348         return 0;
8349 }
8350
8351 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8352 {
8353         struct tg3 *tp = netdev_priv(dev);
8354
8355         if (features & NETIF_F_LOOPBACK) {
8356                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8357                         return;
8358
8359                 spin_lock_bh(&tp->lock);
8360                 tg3_mac_loopback(tp, true);
8361                 netif_carrier_on(tp->dev);
8362                 spin_unlock_bh(&tp->lock);
8363                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8364         } else {
8365                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8366                         return;
8367
8368                 spin_lock_bh(&tp->lock);
8369                 tg3_mac_loopback(tp, false);
8370                 /* Force link status check */
8371                 tg3_setup_phy(tp, true);
8372                 spin_unlock_bh(&tp->lock);
8373                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8374         }
8375 }
8376
8377 static netdev_features_t tg3_fix_features(struct net_device *dev,
8378         netdev_features_t features)
8379 {
8380         struct tg3 *tp = netdev_priv(dev);
8381
8382         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8383                 features &= ~NETIF_F_ALL_TSO;
8384
8385         return features;
8386 }
8387
8388 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8389 {
8390         netdev_features_t changed = dev->features ^ features;
8391
8392         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8393                 tg3_set_loopback(dev, features);
8394
8395         return 0;
8396 }
8397
8398 static void tg3_rx_prodring_free(struct tg3 *tp,
8399                                  struct tg3_rx_prodring_set *tpr)
8400 {
8401         int i;
8402
8403         if (tpr != &tp->napi[0].prodring) {
8404                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8405                      i = (i + 1) & tp->rx_std_ring_mask)
8406                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8407                                         tp->rx_pkt_map_sz);
8408
8409                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8410                         for (i = tpr->rx_jmb_cons_idx;
8411                              i != tpr->rx_jmb_prod_idx;
8412                              i = (i + 1) & tp->rx_jmb_ring_mask) {
8413                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8414                                                 TG3_RX_JMB_MAP_SZ);
8415                         }
8416                 }
8417
8418                 return;
8419         }
8420
8421         for (i = 0; i <= tp->rx_std_ring_mask; i++)
8422                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8423                                 tp->rx_pkt_map_sz);
8424
8425         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8426                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8427                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8428                                         TG3_RX_JMB_MAP_SZ);
8429         }
8430 }
8431
8432 /* Initialize rx rings for packet processing.
8433  *
8434  * The chip has been shut down and the driver detached from
8435  * the networking, so no interrupts or new tx packets will
8436  * end up in the driver.  tp->{tx,}lock are held and thus
8437  * we may not sleep.
8438  */
8439 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8440                                  struct tg3_rx_prodring_set *tpr)
8441 {
8442         u32 i, rx_pkt_dma_sz;
8443
8444         tpr->rx_std_cons_idx = 0;
8445         tpr->rx_std_prod_idx = 0;
8446         tpr->rx_jmb_cons_idx = 0;
8447         tpr->rx_jmb_prod_idx = 0;
8448
8449         if (tpr != &tp->napi[0].prodring) {
8450                 memset(&tpr->rx_std_buffers[0], 0,
8451                        TG3_RX_STD_BUFF_RING_SIZE(tp));
8452                 if (tpr->rx_jmb_buffers)
8453                         memset(&tpr->rx_jmb_buffers[0], 0,
8454                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
8455                 goto done;
8456         }
8457
8458         /* Zero out all descriptors. */
8459         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8460
8461         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8462         if (tg3_flag(tp, 5780_CLASS) &&
8463             tp->dev->mtu > ETH_DATA_LEN)
8464                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8465         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8466
8467         /* Initialize invariants of the rings, we only set this
8468          * stuff once.  This works because the card does not
8469          * write into the rx buffer posting rings.
8470          */
8471         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8472                 struct tg3_rx_buffer_desc *rxd;
8473
8474                 rxd = &tpr->rx_std[i];
8475                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8476                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8477                 rxd->opaque = (RXD_OPAQUE_RING_STD |
8478                                (i << RXD_OPAQUE_INDEX_SHIFT));
8479         }
8480
8481         /* Now allocate fresh SKBs for each rx ring. */
8482         for (i = 0; i < tp->rx_pending; i++) {
8483                 unsigned int frag_size;
8484
8485                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8486                                       &frag_size) < 0) {
8487                         netdev_warn(tp->dev,
8488                                     "Using a smaller RX standard ring. Only "
8489                                     "%d out of %d buffers were allocated "
8490                                     "successfully\n", i, tp->rx_pending);
8491                         if (i == 0)
8492                                 goto initfail;
8493                         tp->rx_pending = i;
8494                         break;
8495                 }
8496         }
8497
8498         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8499                 goto done;
8500
8501         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8502
8503         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8504                 goto done;
8505
8506         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8507                 struct tg3_rx_buffer_desc *rxd;
8508
8509                 rxd = &tpr->rx_jmb[i].std;
8510                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8511                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8512                                   RXD_FLAG_JUMBO;
8513                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8514                        (i << RXD_OPAQUE_INDEX_SHIFT));
8515         }
8516
8517         for (i = 0; i < tp->rx_jumbo_pending; i++) {
8518                 unsigned int frag_size;
8519
8520                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8521                                       &frag_size) < 0) {
8522                         netdev_warn(tp->dev,
8523                                     "Using a smaller RX jumbo ring. Only %d "
8524                                     "out of %d buffers were allocated "
8525                                     "successfully\n", i, tp->rx_jumbo_pending);
8526                         if (i == 0)
8527                                 goto initfail;
8528                         tp->rx_jumbo_pending = i;
8529                         break;
8530                 }
8531         }
8532
8533 done:
8534         return 0;
8535
8536 initfail:
8537         tg3_rx_prodring_free(tp, tpr);
8538         return -ENOMEM;
8539 }
8540
8541 static void tg3_rx_prodring_fini(struct tg3 *tp,
8542                                  struct tg3_rx_prodring_set *tpr)
8543 {
8544         kfree(tpr->rx_std_buffers);
8545         tpr->rx_std_buffers = NULL;
8546         kfree(tpr->rx_jmb_buffers);
8547         tpr->rx_jmb_buffers = NULL;
8548         if (tpr->rx_std) {
8549                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8550                                   tpr->rx_std, tpr->rx_std_mapping);
8551                 tpr->rx_std = NULL;
8552         }
8553         if (tpr->rx_jmb) {
8554                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8555                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
8556                 tpr->rx_jmb = NULL;
8557         }
8558 }
8559
8560 static int tg3_rx_prodring_init(struct tg3 *tp,
8561                                 struct tg3_rx_prodring_set *tpr)
8562 {
8563         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8564                                       GFP_KERNEL);
8565         if (!tpr->rx_std_buffers)
8566                 return -ENOMEM;
8567
8568         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8569                                          TG3_RX_STD_RING_BYTES(tp),
8570                                          &tpr->rx_std_mapping,
8571                                          GFP_KERNEL);
8572         if (!tpr->rx_std)
8573                 goto err_out;
8574
8575         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8576                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8577                                               GFP_KERNEL);
8578                 if (!tpr->rx_jmb_buffers)
8579                         goto err_out;
8580
8581                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8582                                                  TG3_RX_JMB_RING_BYTES(tp),
8583                                                  &tpr->rx_jmb_mapping,
8584                                                  GFP_KERNEL);
8585                 if (!tpr->rx_jmb)
8586                         goto err_out;
8587         }
8588
8589         return 0;
8590
8591 err_out:
8592         tg3_rx_prodring_fini(tp, tpr);
8593         return -ENOMEM;
8594 }
8595
8596 /* Free up pending packets in all rx/tx rings.
8597  *
8598  * The chip has been shut down and the driver detached from
8599  * the networking, so no interrupts or new tx packets will
8600  * end up in the driver.  tp->{tx,}lock is not held and we are not
8601  * in an interrupt context and thus may sleep.
8602  */
8603 static void tg3_free_rings(struct tg3 *tp)
8604 {
8605         int i, j;
8606
8607         for (j = 0; j < tp->irq_cnt; j++) {
8608                 struct tg3_napi *tnapi = &tp->napi[j];
8609
8610                 tg3_rx_prodring_free(tp, &tnapi->prodring);
8611
8612                 if (!tnapi->tx_buffers)
8613                         continue;
8614
8615                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8616                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8617
8618                         if (!skb)
8619                                 continue;
8620
8621                         tg3_tx_skb_unmap(tnapi, i,
8622                                          skb_shinfo(skb)->nr_frags - 1);
8623
8624                         dev_consume_skb_any(skb);
8625                 }
8626                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8627         }
8628 }
8629
8630 /* Initialize tx/rx rings for packet processing.
8631  *
8632  * The chip has been shut down and the driver detached from
8633  * the networking, so no interrupts or new tx packets will
8634  * end up in the driver.  tp->{tx,}lock are held and thus
8635  * we may not sleep.
8636  */
8637 static int tg3_init_rings(struct tg3 *tp)
8638 {
8639         int i;
8640
8641         /* Free up all the SKBs. */
8642         tg3_free_rings(tp);
8643
8644         for (i = 0; i < tp->irq_cnt; i++) {
8645                 struct tg3_napi *tnapi = &tp->napi[i];
8646
8647                 tnapi->last_tag = 0;
8648                 tnapi->last_irq_tag = 0;
8649                 tnapi->hw_status->status = 0;
8650                 tnapi->hw_status->status_tag = 0;
8651                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8652
8653                 tnapi->tx_prod = 0;
8654                 tnapi->tx_cons = 0;
8655                 if (tnapi->tx_ring)
8656                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8657
8658                 tnapi->rx_rcb_ptr = 0;
8659                 if (tnapi->rx_rcb)
8660                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8661
8662                 if (tnapi->prodring.rx_std &&
8663                     tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8664                         tg3_free_rings(tp);
8665                         return -ENOMEM;
8666                 }
8667         }
8668
8669         return 0;
8670 }
8671
8672 static void tg3_mem_tx_release(struct tg3 *tp)
8673 {
8674         int i;
8675
8676         for (i = 0; i < tp->irq_max; i++) {
8677                 struct tg3_napi *tnapi = &tp->napi[i];
8678
8679                 if (tnapi->tx_ring) {
8680                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8681                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
8682                         tnapi->tx_ring = NULL;
8683                 }
8684
8685                 kfree(tnapi->tx_buffers);
8686                 tnapi->tx_buffers = NULL;
8687         }
8688 }
8689
8690 static int tg3_mem_tx_acquire(struct tg3 *tp)
8691 {
8692         int i;
8693         struct tg3_napi *tnapi = &tp->napi[0];
8694
8695         /* If multivector TSS is enabled, vector 0 does not handle
8696          * tx interrupts.  Don't allocate any resources for it.
8697          */
8698         if (tg3_flag(tp, ENABLE_TSS))
8699                 tnapi++;
8700
8701         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8702                 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8703                                             sizeof(struct tg3_tx_ring_info),
8704                                             GFP_KERNEL);
8705                 if (!tnapi->tx_buffers)
8706                         goto err_out;
8707
8708                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8709                                                     TG3_TX_RING_BYTES,
8710                                                     &tnapi->tx_desc_mapping,
8711                                                     GFP_KERNEL);
8712                 if (!tnapi->tx_ring)
8713                         goto err_out;
8714         }
8715
8716         return 0;
8717
8718 err_out:
8719         tg3_mem_tx_release(tp);
8720         return -ENOMEM;
8721 }
8722
8723 static void tg3_mem_rx_release(struct tg3 *tp)
8724 {
8725         int i;
8726
8727         for (i = 0; i < tp->irq_max; i++) {
8728                 struct tg3_napi *tnapi = &tp->napi[i];
8729
8730                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8731
8732                 if (!tnapi->rx_rcb)
8733                         continue;
8734
8735                 dma_free_coherent(&tp->pdev->dev,
8736                                   TG3_RX_RCB_RING_BYTES(tp),
8737                                   tnapi->rx_rcb,
8738                                   tnapi->rx_rcb_mapping);
8739                 tnapi->rx_rcb = NULL;
8740         }
8741 }
8742
8743 static int tg3_mem_rx_acquire(struct tg3 *tp)
8744 {
8745         unsigned int i, limit;
8746
8747         limit = tp->rxq_cnt;
8748
8749         /* If RSS is enabled, we need a (dummy) producer ring
8750          * set on vector zero.  This is the true hw prodring.
8751          */
8752         if (tg3_flag(tp, ENABLE_RSS))
8753                 limit++;
8754
8755         for (i = 0; i < limit; i++) {
8756                 struct tg3_napi *tnapi = &tp->napi[i];
8757
8758                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8759                         goto err_out;
8760
8761                 /* If multivector RSS is enabled, vector 0
8762                  * does not handle rx or tx interrupts.
8763                  * Don't allocate any resources for it.
8764                  */
8765                 if (!i && tg3_flag(tp, ENABLE_RSS))
8766                         continue;
8767
8768                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8769                                                    TG3_RX_RCB_RING_BYTES(tp),
8770                                                    &tnapi->rx_rcb_mapping,
8771                                                    GFP_KERNEL);
8772                 if (!tnapi->rx_rcb)
8773                         goto err_out;
8774         }
8775
8776         return 0;
8777
8778 err_out:
8779         tg3_mem_rx_release(tp);
8780         return -ENOMEM;
8781 }
8782
8783 /*
8784  * Must not be invoked with interrupt sources disabled and
8785  * the hardware shutdown down.
8786  */
8787 static void tg3_free_consistent(struct tg3 *tp)
8788 {
8789         int i;
8790
8791         for (i = 0; i < tp->irq_cnt; i++) {
8792                 struct tg3_napi *tnapi = &tp->napi[i];
8793
8794                 if (tnapi->hw_status) {
8795                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8796                                           tnapi->hw_status,
8797                                           tnapi->status_mapping);
8798                         tnapi->hw_status = NULL;
8799                 }
8800         }
8801
8802         tg3_mem_rx_release(tp);
8803         tg3_mem_tx_release(tp);
8804
8805         /* tp->hw_stats can be referenced safely:
8806          *     1. under rtnl_lock
8807          *     2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8808          */
8809         if (tp->hw_stats) {
8810                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8811                                   tp->hw_stats, tp->stats_mapping);
8812                 tp->hw_stats = NULL;
8813         }
8814 }
8815
8816 /*
8817  * Must not be invoked with interrupt sources disabled and
8818  * the hardware shutdown down.  Can sleep.
8819  */
8820 static int tg3_alloc_consistent(struct tg3 *tp)
8821 {
8822         int i;
8823
8824         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8825                                           sizeof(struct tg3_hw_stats),
8826                                           &tp->stats_mapping, GFP_KERNEL);
8827         if (!tp->hw_stats)
8828                 goto err_out;
8829
8830         for (i = 0; i < tp->irq_cnt; i++) {
8831                 struct tg3_napi *tnapi = &tp->napi[i];
8832                 struct tg3_hw_status *sblk;
8833
8834                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8835                                                       TG3_HW_STATUS_SIZE,
8836                                                       &tnapi->status_mapping,
8837                                                       GFP_KERNEL);
8838                 if (!tnapi->hw_status)
8839                         goto err_out;
8840
8841                 sblk = tnapi->hw_status;
8842
8843                 if (tg3_flag(tp, ENABLE_RSS)) {
8844                         u16 *prodptr = NULL;
8845
8846                         /*
8847                          * When RSS is enabled, the status block format changes
8848                          * slightly.  The "rx_jumbo_consumer", "reserved",
8849                          * and "rx_mini_consumer" members get mapped to the
8850                          * other three rx return ring producer indexes.
8851                          */
8852                         switch (i) {
8853                         case 1:
8854                                 prodptr = &sblk->idx[0].rx_producer;
8855                                 break;
8856                         case 2:
8857                                 prodptr = &sblk->rx_jumbo_consumer;
8858                                 break;
8859                         case 3:
8860                                 prodptr = &sblk->reserved;
8861                                 break;
8862                         case 4:
8863                                 prodptr = &sblk->rx_mini_consumer;
8864                                 break;
8865                         }
8866                         tnapi->rx_rcb_prod_idx = prodptr;
8867                 } else {
8868                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8869                 }
8870         }
8871
8872         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8873                 goto err_out;
8874
8875         return 0;
8876
8877 err_out:
8878         tg3_free_consistent(tp);
8879         return -ENOMEM;
8880 }
8881
8882 #define MAX_WAIT_CNT 1000
8883
8884 /* To stop a block, clear the enable bit and poll till it
8885  * clears.  tp->lock is held.
8886  */
8887 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8888 {
8889         unsigned int i;
8890         u32 val;
8891
8892         if (tg3_flag(tp, 5705_PLUS)) {
8893                 switch (ofs) {
8894                 case RCVLSC_MODE:
8895                 case DMAC_MODE:
8896                 case MBFREE_MODE:
8897                 case BUFMGR_MODE:
8898                 case MEMARB_MODE:
8899                         /* We can't enable/disable these bits of the
8900                          * 5705/5750, just say success.
8901                          */
8902                         return 0;
8903
8904                 default:
8905                         break;
8906                 }
8907         }
8908
8909         val = tr32(ofs);
8910         val &= ~enable_bit;
8911         tw32_f(ofs, val);
8912
8913         for (i = 0; i < MAX_WAIT_CNT; i++) {
8914                 if (pci_channel_offline(tp->pdev)) {
8915                         dev_err(&tp->pdev->dev,
8916                                 "tg3_stop_block device offline, "
8917                                 "ofs=%lx enable_bit=%x\n",
8918                                 ofs, enable_bit);
8919                         return -ENODEV;
8920                 }
8921
8922                 udelay(100);
8923                 val = tr32(ofs);
8924                 if ((val & enable_bit) == 0)
8925                         break;
8926         }
8927
8928         if (i == MAX_WAIT_CNT && !silent) {
8929                 dev_err(&tp->pdev->dev,
8930                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8931                         ofs, enable_bit);
8932                 return -ENODEV;
8933         }
8934
8935         return 0;
8936 }
8937
8938 /* tp->lock is held. */
8939 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8940 {
8941         int i, err;
8942
8943         tg3_disable_ints(tp);
8944
8945         if (pci_channel_offline(tp->pdev)) {
8946                 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8947                 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8948                 err = -ENODEV;
8949                 goto err_no_dev;
8950         }
8951
8952         tp->rx_mode &= ~RX_MODE_ENABLE;
8953         tw32_f(MAC_RX_MODE, tp->rx_mode);
8954         udelay(10);
8955
8956         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8957         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8958         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8959         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8960         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8961         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8962
8963         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8964         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8965         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8966         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8967         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8968         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8969         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8970
8971         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8972         tw32_f(MAC_MODE, tp->mac_mode);
8973         udelay(40);
8974
8975         tp->tx_mode &= ~TX_MODE_ENABLE;
8976         tw32_f(MAC_TX_MODE, tp->tx_mode);
8977
8978         for (i = 0; i < MAX_WAIT_CNT; i++) {
8979                 udelay(100);
8980                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8981                         break;
8982         }
8983         if (i >= MAX_WAIT_CNT) {
8984                 dev_err(&tp->pdev->dev,
8985                         "%s timed out, TX_MODE_ENABLE will not clear "
8986                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8987                 err |= -ENODEV;
8988         }
8989
8990         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8991         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8992         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8993
8994         tw32(FTQ_RESET, 0xffffffff);
8995         tw32(FTQ_RESET, 0x00000000);
8996
8997         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8998         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8999
9000 err_no_dev:
9001         for (i = 0; i < tp->irq_cnt; i++) {
9002                 struct tg3_napi *tnapi = &tp->napi[i];
9003                 if (tnapi->hw_status)
9004                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9005         }
9006
9007         return err;
9008 }
9009
9010 /* Save PCI command register before chip reset */
9011 static void tg3_save_pci_state(struct tg3 *tp)
9012 {
9013         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
9014 }
9015
9016 /* Restore PCI state after chip reset */
9017 static void tg3_restore_pci_state(struct tg3 *tp)
9018 {
9019         u32 val;
9020
9021         /* Re-enable indirect register accesses. */
9022         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9023                                tp->misc_host_ctrl);
9024
9025         /* Set MAX PCI retry to zero. */
9026         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
9027         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9028             tg3_flag(tp, PCIX_MODE))
9029                 val |= PCISTATE_RETRY_SAME_DMA;
9030         /* Allow reads and writes to the APE register and memory space. */
9031         if (tg3_flag(tp, ENABLE_APE))
9032                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9033                        PCISTATE_ALLOW_APE_SHMEM_WR |
9034                        PCISTATE_ALLOW_APE_PSPACE_WR;
9035         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
9036
9037         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
9038
9039         if (!tg3_flag(tp, PCI_EXPRESS)) {
9040                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
9041                                       tp->pci_cacheline_sz);
9042                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
9043                                       tp->pci_lat_timer);
9044         }
9045
9046         /* Make sure PCI-X relaxed ordering bit is clear. */
9047         if (tg3_flag(tp, PCIX_MODE)) {
9048                 u16 pcix_cmd;
9049
9050                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9051                                      &pcix_cmd);
9052                 pcix_cmd &= ~PCI_X_CMD_ERO;
9053                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9054                                       pcix_cmd);
9055         }
9056
9057         if (tg3_flag(tp, 5780_CLASS)) {
9058
9059                 /* Chip reset on 5780 will reset MSI enable bit,
9060                  * so need to restore it.
9061                  */
9062                 if (tg3_flag(tp, USING_MSI)) {
9063                         u16 ctrl;
9064
9065                         pci_read_config_word(tp->pdev,
9066                                              tp->msi_cap + PCI_MSI_FLAGS,
9067                                              &ctrl);
9068                         pci_write_config_word(tp->pdev,
9069                                               tp->msi_cap + PCI_MSI_FLAGS,
9070                                               ctrl | PCI_MSI_FLAGS_ENABLE);
9071                         val = tr32(MSGINT_MODE);
9072                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9073                 }
9074         }
9075 }
9076
9077 static void tg3_override_clk(struct tg3 *tp)
9078 {
9079         u32 val;
9080
9081         switch (tg3_asic_rev(tp)) {
9082         case ASIC_REV_5717:
9083                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9084                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9085                      TG3_CPMU_MAC_ORIDE_ENABLE);
9086                 break;
9087
9088         case ASIC_REV_5719:
9089         case ASIC_REV_5720:
9090                 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9091                 break;
9092
9093         default:
9094                 return;
9095         }
9096 }
9097
9098 static void tg3_restore_clk(struct tg3 *tp)
9099 {
9100         u32 val;
9101
9102         switch (tg3_asic_rev(tp)) {
9103         case ASIC_REV_5717:
9104                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9105                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9106                      val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9107                 break;
9108
9109         case ASIC_REV_5719:
9110         case ASIC_REV_5720:
9111                 val = tr32(TG3_CPMU_CLCK_ORIDE);
9112                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9113                 break;
9114
9115         default:
9116                 return;
9117         }
9118 }
9119
9120 /* tp->lock is held. */
9121 static int tg3_chip_reset(struct tg3 *tp)
9122         __releases(tp->lock)
9123         __acquires(tp->lock)
9124 {
9125         u32 val;
9126         void (*write_op)(struct tg3 *, u32, u32);
9127         int i, err;
9128
9129         if (!pci_device_is_present(tp->pdev))
9130                 return -ENODEV;
9131
9132         tg3_nvram_lock(tp);
9133
9134         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9135
9136         /* No matching tg3_nvram_unlock() after this because
9137          * chip reset below will undo the nvram lock.
9138          */
9139         tp->nvram_lock_cnt = 0;
9140
9141         /* GRC_MISC_CFG core clock reset will clear the memory
9142          * enable bit in PCI register 4 and the MSI enable bit
9143          * on some chips, so we save relevant registers here.
9144          */
9145         tg3_save_pci_state(tp);
9146
9147         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9148             tg3_flag(tp, 5755_PLUS))
9149                 tw32(GRC_FASTBOOT_PC, 0);
9150
9151         /*
9152          * We must avoid the readl() that normally takes place.
9153          * It locks machines, causes machine checks, and other
9154          * fun things.  So, temporarily disable the 5701
9155          * hardware workaround, while we do the reset.
9156          */
9157         write_op = tp->write32;
9158         if (write_op == tg3_write_flush_reg32)
9159                 tp->write32 = tg3_write32;
9160
9161         /* Prevent the irq handler from reading or writing PCI registers
9162          * during chip reset when the memory enable bit in the PCI command
9163          * register may be cleared.  The chip does not generate interrupt
9164          * at this time, but the irq handler may still be called due to irq
9165          * sharing or irqpoll.
9166          */
9167         tg3_flag_set(tp, CHIP_RESETTING);
9168         for (i = 0; i < tp->irq_cnt; i++) {
9169                 struct tg3_napi *tnapi = &tp->napi[i];
9170                 if (tnapi->hw_status) {
9171                         tnapi->hw_status->status = 0;
9172                         tnapi->hw_status->status_tag = 0;
9173                 }
9174                 tnapi->last_tag = 0;
9175                 tnapi->last_irq_tag = 0;
9176         }
9177         smp_mb();
9178
9179         tg3_full_unlock(tp);
9180
9181         for (i = 0; i < tp->irq_cnt; i++)
9182                 synchronize_irq(tp->napi[i].irq_vec);
9183
9184         tg3_full_lock(tp, 0);
9185
9186         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9187                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9188                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9189         }
9190
9191         /* do the reset */
9192         val = GRC_MISC_CFG_CORECLK_RESET;
9193
9194         if (tg3_flag(tp, PCI_EXPRESS)) {
9195                 /* Force PCIe 1.0a mode */
9196                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9197                     !tg3_flag(tp, 57765_PLUS) &&
9198                     tr32(TG3_PCIE_PHY_TSTCTL) ==
9199                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9200                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9201
9202                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9203                         tw32(GRC_MISC_CFG, (1 << 29));
9204                         val |= (1 << 29);
9205                 }
9206         }
9207
9208         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9209                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9210                 tw32(GRC_VCPU_EXT_CTRL,
9211                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9212         }
9213
9214         /* Set the clock to the highest frequency to avoid timeouts. With link
9215          * aware mode, the clock speed could be slow and bootcode does not
9216          * complete within the expected time. Override the clock to allow the
9217          * bootcode to finish sooner and then restore it.
9218          */
9219         tg3_override_clk(tp);
9220
9221         /* Manage gphy power for all CPMU absent PCIe devices. */
9222         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9223                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9224
9225         tw32(GRC_MISC_CFG, val);
9226
9227         /* restore 5701 hardware bug workaround write method */
9228         tp->write32 = write_op;
9229
9230         /* Unfortunately, we have to delay before the PCI read back.
9231          * Some 575X chips even will not respond to a PCI cfg access
9232          * when the reset command is given to the chip.
9233          *
9234          * How do these hardware designers expect things to work
9235          * properly if the PCI write is posted for a long period
9236          * of time?  It is always necessary to have some method by
9237          * which a register read back can occur to push the write
9238          * out which does the reset.
9239          *
9240          * For most tg3 variants the trick below was working.
9241          * Ho hum...
9242          */
9243         udelay(120);
9244
9245         /* Flush PCI posted writes.  The normal MMIO registers
9246          * are inaccessible at this time so this is the only
9247          * way to make this reliably (actually, this is no longer
9248          * the case, see above).  I tried to use indirect
9249          * register read/write but this upset some 5701 variants.
9250          */
9251         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9252
9253         udelay(120);
9254
9255         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9256                 u16 val16;
9257
9258                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9259                         int j;
9260                         u32 cfg_val;
9261
9262                         /* Wait for link training to complete.  */
9263                         for (j = 0; j < 5000; j++)
9264                                 udelay(100);
9265
9266                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9267                         pci_write_config_dword(tp->pdev, 0xc4,
9268                                                cfg_val | (1 << 15));
9269                 }
9270
9271                 /* Clear the "no snoop" and "relaxed ordering" bits. */
9272                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9273                 /*
9274                  * Older PCIe devices only support the 128 byte
9275                  * MPS setting.  Enforce the restriction.
9276                  */
9277                 if (!tg3_flag(tp, CPMU_PRESENT))
9278                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9279                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9280
9281                 /* Clear error status */
9282                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9283                                       PCI_EXP_DEVSTA_CED |
9284                                       PCI_EXP_DEVSTA_NFED |
9285                                       PCI_EXP_DEVSTA_FED |
9286                                       PCI_EXP_DEVSTA_URD);
9287         }
9288
9289         tg3_restore_pci_state(tp);
9290
9291         tg3_flag_clear(tp, CHIP_RESETTING);
9292         tg3_flag_clear(tp, ERROR_PROCESSED);
9293
9294         val = 0;
9295         if (tg3_flag(tp, 5780_CLASS))
9296                 val = tr32(MEMARB_MODE);
9297         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9298
9299         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9300                 tg3_stop_fw(tp);
9301                 tw32(0x5000, 0x400);
9302         }
9303
9304         if (tg3_flag(tp, IS_SSB_CORE)) {
9305                 /*
9306                  * BCM4785: In order to avoid repercussions from using
9307                  * potentially defective internal ROM, stop the Rx RISC CPU,
9308                  * which is not required.
9309                  */
9310                 tg3_stop_fw(tp);
9311                 tg3_halt_cpu(tp, RX_CPU_BASE);
9312         }
9313
9314         err = tg3_poll_fw(tp);
9315         if (err)
9316                 return err;
9317
9318         tw32(GRC_MODE, tp->grc_mode);
9319
9320         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9321                 val = tr32(0xc4);
9322
9323                 tw32(0xc4, val | (1 << 15));
9324         }
9325
9326         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9327             tg3_asic_rev(tp) == ASIC_REV_5705) {
9328                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9329                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9330                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9331                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9332         }
9333
9334         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9335                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9336                 val = tp->mac_mode;
9337         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9338                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9339                 val = tp->mac_mode;
9340         } else
9341                 val = 0;
9342
9343         tw32_f(MAC_MODE, val);
9344         udelay(40);
9345
9346         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9347
9348         tg3_mdio_start(tp);
9349
9350         if (tg3_flag(tp, PCI_EXPRESS) &&
9351             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9352             tg3_asic_rev(tp) != ASIC_REV_5785 &&
9353             !tg3_flag(tp, 57765_PLUS)) {
9354                 val = tr32(0x7c00);
9355
9356                 tw32(0x7c00, val | (1 << 25));
9357         }
9358
9359         tg3_restore_clk(tp);
9360
9361         /* Increase the core clock speed to fix tx timeout issue for 5762
9362          * with 100Mbps link speed.
9363          */
9364         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9365                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9366                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9367                      TG3_CPMU_MAC_ORIDE_ENABLE);
9368         }
9369
9370         /* Reprobe ASF enable state.  */
9371         tg3_flag_clear(tp, ENABLE_ASF);
9372         tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9373                            TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9374
9375         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9376         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9377         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9378                 u32 nic_cfg;
9379
9380                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9381                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9382                         tg3_flag_set(tp, ENABLE_ASF);
9383                         tp->last_event_jiffies = jiffies;
9384                         if (tg3_flag(tp, 5750_PLUS))
9385                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9386
9387                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9388                         if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9389                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9390                         if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9391                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9392                 }
9393         }
9394
9395         return 0;
9396 }
9397
9398 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9399 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9400 static void __tg3_set_rx_mode(struct net_device *);
9401
9402 /* tp->lock is held. */
9403 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9404 {
9405         int err, i;
9406
9407         tg3_stop_fw(tp);
9408
9409         tg3_write_sig_pre_reset(tp, kind);
9410
9411         tg3_abort_hw(tp, silent);
9412         err = tg3_chip_reset(tp);
9413
9414         __tg3_set_mac_addr(tp, false);
9415
9416         tg3_write_sig_legacy(tp, kind);
9417         tg3_write_sig_post_reset(tp, kind);
9418
9419         if (tp->hw_stats) {
9420                 /* Save the stats across chip resets... */
9421                 tg3_get_nstats(tp, &tp->net_stats_prev);
9422                 tg3_get_estats(tp, &tp->estats_prev);
9423
9424                 /* And make sure the next sample is new data */
9425                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9426
9427                 for (i = 0; i < TG3_IRQ_MAX_VECS; ++i) {
9428                         struct tg3_napi *tnapi = &tp->napi[i];
9429
9430                         tnapi->rx_dropped = 0;
9431                         tnapi->tx_dropped = 0;
9432                 }
9433         }
9434
9435         return err;
9436 }
9437
9438 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9439 {
9440         struct tg3 *tp = netdev_priv(dev);
9441         struct sockaddr *addr = p;
9442         int err = 0;
9443         bool skip_mac_1 = false;
9444
9445         if (!is_valid_ether_addr(addr->sa_data))
9446                 return -EADDRNOTAVAIL;
9447
9448         eth_hw_addr_set(dev, addr->sa_data);
9449
9450         if (!netif_running(dev))
9451                 return 0;
9452
9453         if (tg3_flag(tp, ENABLE_ASF)) {
9454                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9455
9456                 addr0_high = tr32(MAC_ADDR_0_HIGH);
9457                 addr0_low = tr32(MAC_ADDR_0_LOW);
9458                 addr1_high = tr32(MAC_ADDR_1_HIGH);
9459                 addr1_low = tr32(MAC_ADDR_1_LOW);
9460
9461                 /* Skip MAC addr 1 if ASF is using it. */
9462                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9463                     !(addr1_high == 0 && addr1_low == 0))
9464                         skip_mac_1 = true;
9465         }
9466         spin_lock_bh(&tp->lock);
9467         __tg3_set_mac_addr(tp, skip_mac_1);
9468         __tg3_set_rx_mode(dev);
9469         spin_unlock_bh(&tp->lock);
9470
9471         return err;
9472 }
9473
9474 /* tp->lock is held. */
9475 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9476                            dma_addr_t mapping, u32 maxlen_flags,
9477                            u32 nic_addr)
9478 {
9479         tg3_write_mem(tp,
9480                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9481                       ((u64) mapping >> 32));
9482         tg3_write_mem(tp,
9483                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9484                       ((u64) mapping & 0xffffffff));
9485         tg3_write_mem(tp,
9486                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9487                        maxlen_flags);
9488
9489         if (!tg3_flag(tp, 5705_PLUS))
9490                 tg3_write_mem(tp,
9491                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9492                               nic_addr);
9493 }
9494
9495
9496 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9497 {
9498         int i = 0;
9499
9500         if (!tg3_flag(tp, ENABLE_TSS)) {
9501                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9502                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9503                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9504         } else {
9505                 tw32(HOSTCC_TXCOL_TICKS, 0);
9506                 tw32(HOSTCC_TXMAX_FRAMES, 0);
9507                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9508
9509                 for (; i < tp->txq_cnt; i++) {
9510                         u32 reg;
9511
9512                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9513                         tw32(reg, ec->tx_coalesce_usecs);
9514                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9515                         tw32(reg, ec->tx_max_coalesced_frames);
9516                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9517                         tw32(reg, ec->tx_max_coalesced_frames_irq);
9518                 }
9519         }
9520
9521         for (; i < tp->irq_max - 1; i++) {
9522                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9523                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9524                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9525         }
9526 }
9527
9528 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9529 {
9530         int i = 0;
9531         u32 limit = tp->rxq_cnt;
9532
9533         if (!tg3_flag(tp, ENABLE_RSS)) {
9534                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9535                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9536                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9537                 limit--;
9538         } else {
9539                 tw32(HOSTCC_RXCOL_TICKS, 0);
9540                 tw32(HOSTCC_RXMAX_FRAMES, 0);
9541                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9542         }
9543
9544         for (; i < limit; i++) {
9545                 u32 reg;
9546
9547                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9548                 tw32(reg, ec->rx_coalesce_usecs);
9549                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9550                 tw32(reg, ec->rx_max_coalesced_frames);
9551                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9552                 tw32(reg, ec->rx_max_coalesced_frames_irq);
9553         }
9554
9555         for (; i < tp->irq_max - 1; i++) {
9556                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9557                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9558                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9559         }
9560 }
9561
9562 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9563 {
9564         tg3_coal_tx_init(tp, ec);
9565         tg3_coal_rx_init(tp, ec);
9566
9567         if (!tg3_flag(tp, 5705_PLUS)) {
9568                 u32 val = ec->stats_block_coalesce_usecs;
9569
9570                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9571                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9572
9573                 if (!tp->link_up)
9574                         val = 0;
9575
9576                 tw32(HOSTCC_STAT_COAL_TICKS, val);
9577         }
9578 }
9579
9580 /* tp->lock is held. */
9581 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9582 {
9583         u32 txrcb, limit;
9584
9585         /* Disable all transmit rings but the first. */
9586         if (!tg3_flag(tp, 5705_PLUS))
9587                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9588         else if (tg3_flag(tp, 5717_PLUS))
9589                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9590         else if (tg3_flag(tp, 57765_CLASS) ||
9591                  tg3_asic_rev(tp) == ASIC_REV_5762)
9592                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9593         else
9594                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9595
9596         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9597              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9598                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9599                               BDINFO_FLAGS_DISABLED);
9600 }
9601
9602 /* tp->lock is held. */
9603 static void tg3_tx_rcbs_init(struct tg3 *tp)
9604 {
9605         int i = 0;
9606         u32 txrcb = NIC_SRAM_SEND_RCB;
9607
9608         if (tg3_flag(tp, ENABLE_TSS))
9609                 i++;
9610
9611         for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9612                 struct tg3_napi *tnapi = &tp->napi[i];
9613
9614                 if (!tnapi->tx_ring)
9615                         continue;
9616
9617                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9618                                (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9619                                NIC_SRAM_TX_BUFFER_DESC);
9620         }
9621 }
9622
9623 /* tp->lock is held. */
9624 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9625 {
9626         u32 rxrcb, limit;
9627
9628         /* Disable all receive return rings but the first. */
9629         if (tg3_flag(tp, 5717_PLUS))
9630                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9631         else if (!tg3_flag(tp, 5705_PLUS))
9632                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9633         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9634                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
9635                  tg3_flag(tp, 57765_CLASS))
9636                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9637         else
9638                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9639
9640         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9641              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9642                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9643                               BDINFO_FLAGS_DISABLED);
9644 }
9645
9646 /* tp->lock is held. */
9647 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9648 {
9649         int i = 0;
9650         u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9651
9652         if (tg3_flag(tp, ENABLE_RSS))
9653                 i++;
9654
9655         for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9656                 struct tg3_napi *tnapi = &tp->napi[i];
9657
9658                 if (!tnapi->rx_rcb)
9659                         continue;
9660
9661                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9662                                (tp->rx_ret_ring_mask + 1) <<
9663                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9664         }
9665 }
9666
9667 /* tp->lock is held. */
9668 static void tg3_rings_reset(struct tg3 *tp)
9669 {
9670         int i;
9671         u32 stblk;
9672         struct tg3_napi *tnapi = &tp->napi[0];
9673
9674         tg3_tx_rcbs_disable(tp);
9675
9676         tg3_rx_ret_rcbs_disable(tp);
9677
9678         /* Disable interrupts */
9679         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9680         tp->napi[0].chk_msi_cnt = 0;
9681         tp->napi[0].last_rx_cons = 0;
9682         tp->napi[0].last_tx_cons = 0;
9683
9684         /* Zero mailbox registers. */
9685         if (tg3_flag(tp, SUPPORT_MSIX)) {
9686                 for (i = 1; i < tp->irq_max; i++) {
9687                         tp->napi[i].tx_prod = 0;
9688                         tp->napi[i].tx_cons = 0;
9689                         if (tg3_flag(tp, ENABLE_TSS))
9690                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
9691                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
9692                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9693                         tp->napi[i].chk_msi_cnt = 0;
9694                         tp->napi[i].last_rx_cons = 0;
9695                         tp->napi[i].last_tx_cons = 0;
9696                 }
9697                 if (!tg3_flag(tp, ENABLE_TSS))
9698                         tw32_mailbox(tp->napi[0].prodmbox, 0);
9699         } else {
9700                 tp->napi[0].tx_prod = 0;
9701                 tp->napi[0].tx_cons = 0;
9702                 tw32_mailbox(tp->napi[0].prodmbox, 0);
9703                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9704         }
9705
9706         /* Make sure the NIC-based send BD rings are disabled. */
9707         if (!tg3_flag(tp, 5705_PLUS)) {
9708                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9709                 for (i = 0; i < 16; i++)
9710                         tw32_tx_mbox(mbox + i * 8, 0);
9711         }
9712
9713         /* Clear status block in ram. */
9714         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9715
9716         /* Set status block DMA address */
9717         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9718              ((u64) tnapi->status_mapping >> 32));
9719         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9720              ((u64) tnapi->status_mapping & 0xffffffff));
9721
9722         stblk = HOSTCC_STATBLCK_RING1;
9723
9724         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9725                 u64 mapping = (u64)tnapi->status_mapping;
9726                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9727                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9728                 stblk += 8;
9729
9730                 /* Clear status block in ram. */
9731                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9732         }
9733
9734         tg3_tx_rcbs_init(tp);
9735         tg3_rx_ret_rcbs_init(tp);
9736 }
9737
9738 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9739 {
9740         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9741
9742         if (!tg3_flag(tp, 5750_PLUS) ||
9743             tg3_flag(tp, 5780_CLASS) ||
9744             tg3_asic_rev(tp) == ASIC_REV_5750 ||
9745             tg3_asic_rev(tp) == ASIC_REV_5752 ||
9746             tg3_flag(tp, 57765_PLUS))
9747                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9748         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9749                  tg3_asic_rev(tp) == ASIC_REV_5787)
9750                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9751         else
9752                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9753
9754         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9755         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9756
9757         val = min(nic_rep_thresh, host_rep_thresh);
9758         tw32(RCVBDI_STD_THRESH, val);
9759
9760         if (tg3_flag(tp, 57765_PLUS))
9761                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9762
9763         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9764                 return;
9765
9766         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9767
9768         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9769
9770         val = min(bdcache_maxcnt / 2, host_rep_thresh);
9771         tw32(RCVBDI_JUMBO_THRESH, val);
9772
9773         if (tg3_flag(tp, 57765_PLUS))
9774                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9775 }
9776
9777 static inline u32 calc_crc(unsigned char *buf, int len)
9778 {
9779         u32 reg;
9780         u32 tmp;
9781         int j, k;
9782
9783         reg = 0xffffffff;
9784
9785         for (j = 0; j < len; j++) {
9786                 reg ^= buf[j];
9787
9788                 for (k = 0; k < 8; k++) {
9789                         tmp = reg & 0x01;
9790
9791                         reg >>= 1;
9792
9793                         if (tmp)
9794                                 reg ^= CRC32_POLY_LE;
9795                 }
9796         }
9797
9798         return ~reg;
9799 }
9800
9801 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9802 {
9803         /* accept or reject all multicast frames */
9804         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9805         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9806         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9807         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9808 }
9809
9810 static void __tg3_set_rx_mode(struct net_device *dev)
9811 {
9812         struct tg3 *tp = netdev_priv(dev);
9813         u32 rx_mode;
9814
9815         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9816                                   RX_MODE_KEEP_VLAN_TAG);
9817
9818 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9819         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9820          * flag clear.
9821          */
9822         if (!tg3_flag(tp, ENABLE_ASF))
9823                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9824 #endif
9825
9826         if (dev->flags & IFF_PROMISC) {
9827                 /* Promiscuous mode. */
9828                 rx_mode |= RX_MODE_PROMISC;
9829         } else if (dev->flags & IFF_ALLMULTI) {
9830                 /* Accept all multicast. */
9831                 tg3_set_multi(tp, 1);
9832         } else if (netdev_mc_empty(dev)) {
9833                 /* Reject all multicast. */
9834                 tg3_set_multi(tp, 0);
9835         } else {
9836                 /* Accept one or more multicast(s). */
9837                 struct netdev_hw_addr *ha;
9838                 u32 mc_filter[4] = { 0, };
9839                 u32 regidx;
9840                 u32 bit;
9841                 u32 crc;
9842
9843                 netdev_for_each_mc_addr(ha, dev) {
9844                         crc = calc_crc(ha->addr, ETH_ALEN);
9845                         bit = ~crc & 0x7f;
9846                         regidx = (bit & 0x60) >> 5;
9847                         bit &= 0x1f;
9848                         mc_filter[regidx] |= (1 << bit);
9849                 }
9850
9851                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9852                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9853                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9854                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9855         }
9856
9857         if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9858                 rx_mode |= RX_MODE_PROMISC;
9859         } else if (!(dev->flags & IFF_PROMISC)) {
9860                 /* Add all entries into to the mac addr filter list */
9861                 int i = 0;
9862                 struct netdev_hw_addr *ha;
9863
9864                 netdev_for_each_uc_addr(ha, dev) {
9865                         __tg3_set_one_mac_addr(tp, ha->addr,
9866                                                i + TG3_UCAST_ADDR_IDX(tp));
9867                         i++;
9868                 }
9869         }
9870
9871         if (rx_mode != tp->rx_mode) {
9872                 tp->rx_mode = rx_mode;
9873                 tw32_f(MAC_RX_MODE, rx_mode);
9874                 udelay(10);
9875         }
9876 }
9877
9878 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9879 {
9880         int i;
9881
9882         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9883                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9884 }
9885
9886 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9887 {
9888         int i;
9889
9890         if (!tg3_flag(tp, SUPPORT_MSIX))
9891                 return;
9892
9893         if (tp->rxq_cnt == 1) {
9894                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9895                 return;
9896         }
9897
9898         /* Validate table against current IRQ count */
9899         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9900                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9901                         break;
9902         }
9903
9904         if (i != TG3_RSS_INDIR_TBL_SIZE)
9905                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9906 }
9907
9908 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9909 {
9910         int i = 0;
9911         u32 reg = MAC_RSS_INDIR_TBL_0;
9912
9913         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9914                 u32 val = tp->rss_ind_tbl[i];
9915                 i++;
9916                 for (; i % 8; i++) {
9917                         val <<= 4;
9918                         val |= tp->rss_ind_tbl[i];
9919                 }
9920                 tw32(reg, val);
9921                 reg += 4;
9922         }
9923 }
9924
9925 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9926 {
9927         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9928                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9929         else
9930                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9931 }
9932
9933 /* tp->lock is held. */
9934 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9935 {
9936         u32 val, rdmac_mode;
9937         int i, err, limit;
9938         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9939
9940         tg3_disable_ints(tp);
9941
9942         tg3_stop_fw(tp);
9943
9944         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9945
9946         if (tg3_flag(tp, INIT_COMPLETE))
9947                 tg3_abort_hw(tp, 1);
9948
9949         if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9950             !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9951                 tg3_phy_pull_config(tp);
9952                 tg3_eee_pull_config(tp, NULL);
9953                 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9954         }
9955
9956         /* Enable MAC control of LPI */
9957         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9958                 tg3_setup_eee(tp);
9959
9960         if (reset_phy)
9961                 tg3_phy_reset(tp);
9962
9963         err = tg3_chip_reset(tp);
9964         if (err)
9965                 return err;
9966
9967         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9968
9969         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9970                 val = tr32(TG3_CPMU_CTRL);
9971                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9972                 tw32(TG3_CPMU_CTRL, val);
9973
9974                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9975                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9976                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9977                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9978
9979                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9980                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9981                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9982                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9983
9984                 val = tr32(TG3_CPMU_HST_ACC);
9985                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9986                 val |= CPMU_HST_ACC_MACCLK_6_25;
9987                 tw32(TG3_CPMU_HST_ACC, val);
9988         }
9989
9990         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9991                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9992                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9993                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9994                 tw32(PCIE_PWR_MGMT_THRESH, val);
9995
9996                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9997                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9998
9999                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
10000
10001                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
10002                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
10003         }
10004
10005         if (tg3_flag(tp, L1PLLPD_EN)) {
10006                 u32 grc_mode = tr32(GRC_MODE);
10007
10008                 /* Access the lower 1K of PL PCIE block registers. */
10009                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10010                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
10011
10012                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
10013                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
10014                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
10015
10016                 tw32(GRC_MODE, grc_mode);
10017         }
10018
10019         if (tg3_flag(tp, 57765_CLASS)) {
10020                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
10021                         u32 grc_mode = tr32(GRC_MODE);
10022
10023                         /* Access the lower 1K of PL PCIE block registers. */
10024                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10025                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
10026
10027                         val = tr32(TG3_PCIE_TLDLPL_PORT +
10028                                    TG3_PCIE_PL_LO_PHYCTL5);
10029                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
10030                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
10031
10032                         tw32(GRC_MODE, grc_mode);
10033                 }
10034
10035                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
10036                         u32 grc_mode;
10037
10038                         /* Fix transmit hangs */
10039                         val = tr32(TG3_CPMU_PADRNG_CTL);
10040                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
10041                         tw32(TG3_CPMU_PADRNG_CTL, val);
10042
10043                         grc_mode = tr32(GRC_MODE);
10044
10045                         /* Access the lower 1K of DL PCIE block registers. */
10046                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10047                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
10048
10049                         val = tr32(TG3_PCIE_TLDLPL_PORT +
10050                                    TG3_PCIE_DL_LO_FTSMAX);
10051                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
10052                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
10053                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
10054
10055                         tw32(GRC_MODE, grc_mode);
10056                 }
10057
10058                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
10059                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
10060                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
10061                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
10062         }
10063
10064         /* This works around an issue with Athlon chipsets on
10065          * B3 tigon3 silicon.  This bit has no effect on any
10066          * other revision.  But do not set this on PCI Express
10067          * chips and don't even touch the clocks if the CPMU is present.
10068          */
10069         if (!tg3_flag(tp, CPMU_PRESENT)) {
10070                 if (!tg3_flag(tp, PCI_EXPRESS))
10071                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
10072                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
10073         }
10074
10075         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
10076             tg3_flag(tp, PCIX_MODE)) {
10077                 val = tr32(TG3PCI_PCISTATE);
10078                 val |= PCISTATE_RETRY_SAME_DMA;
10079                 tw32(TG3PCI_PCISTATE, val);
10080         }
10081
10082         if (tg3_flag(tp, ENABLE_APE)) {
10083                 /* Allow reads and writes to the
10084                  * APE register and memory space.
10085                  */
10086                 val = tr32(TG3PCI_PCISTATE);
10087                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10088                        PCISTATE_ALLOW_APE_SHMEM_WR |
10089                        PCISTATE_ALLOW_APE_PSPACE_WR;
10090                 tw32(TG3PCI_PCISTATE, val);
10091         }
10092
10093         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10094                 /* Enable some hw fixes.  */
10095                 val = tr32(TG3PCI_MSI_DATA);
10096                 val |= (1 << 26) | (1 << 28) | (1 << 29);
10097                 tw32(TG3PCI_MSI_DATA, val);
10098         }
10099
10100         /* Descriptor ring init may make accesses to the
10101          * NIC SRAM area to setup the TX descriptors, so we
10102          * can only do this after the hardware has been
10103          * successfully reset.
10104          */
10105         err = tg3_init_rings(tp);
10106         if (err)
10107                 return err;
10108
10109         if (tg3_flag(tp, 57765_PLUS)) {
10110                 val = tr32(TG3PCI_DMA_RW_CTRL) &
10111                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10112                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10113                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10114                 if (!tg3_flag(tp, 57765_CLASS) &&
10115                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
10116                     tg3_asic_rev(tp) != ASIC_REV_5762)
10117                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
10118                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10119         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10120                    tg3_asic_rev(tp) != ASIC_REV_5761) {
10121                 /* This value is determined during the probe time DMA
10122                  * engine test, tg3_test_dma.
10123                  */
10124                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10125         }
10126
10127         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10128                           GRC_MODE_4X_NIC_SEND_RINGS |
10129                           GRC_MODE_NO_TX_PHDR_CSUM |
10130                           GRC_MODE_NO_RX_PHDR_CSUM);
10131         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10132
10133         /* Pseudo-header checksum is done by hardware logic and not
10134          * the offload processers, so make the chip do the pseudo-
10135          * header checksums on receive.  For transmit it is more
10136          * convenient to do the pseudo-header checksum in software
10137          * as Linux does that on transmit for us in all cases.
10138          */
10139         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10140
10141         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10142         if (tp->rxptpctl)
10143                 tw32(TG3_RX_PTP_CTL,
10144                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10145
10146         if (tg3_flag(tp, PTP_CAPABLE))
10147                 val |= GRC_MODE_TIME_SYNC_ENABLE;
10148
10149         tw32(GRC_MODE, tp->grc_mode | val);
10150
10151         /* On one of the AMD platform, MRRS is restricted to 4000 because of
10152          * south bridge limitation. As a workaround, Driver is setting MRRS
10153          * to 2048 instead of default 4096.
10154          */
10155         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10156             tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10157                 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10158                 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10159         }
10160
10161         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
10162         val = tr32(GRC_MISC_CFG);
10163         val &= ~0xff;
10164         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10165         tw32(GRC_MISC_CFG, val);
10166
10167         /* Initialize MBUF/DESC pool. */
10168         if (tg3_flag(tp, 5750_PLUS)) {
10169                 /* Do nothing.  */
10170         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10171                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10172                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10173                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10174                 else
10175                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10176                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10177                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10178         } else if (tg3_flag(tp, TSO_CAPABLE)) {
10179                 int fw_len;
10180
10181                 fw_len = tp->fw_len;
10182                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10183                 tw32(BUFMGR_MB_POOL_ADDR,
10184                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10185                 tw32(BUFMGR_MB_POOL_SIZE,
10186                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10187         }
10188
10189         if (tp->dev->mtu <= ETH_DATA_LEN) {
10190                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10191                      tp->bufmgr_config.mbuf_read_dma_low_water);
10192                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10193                      tp->bufmgr_config.mbuf_mac_rx_low_water);
10194                 tw32(BUFMGR_MB_HIGH_WATER,
10195                      tp->bufmgr_config.mbuf_high_water);
10196         } else {
10197                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10198                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10199                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10200                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10201                 tw32(BUFMGR_MB_HIGH_WATER,
10202                      tp->bufmgr_config.mbuf_high_water_jumbo);
10203         }
10204         tw32(BUFMGR_DMA_LOW_WATER,
10205              tp->bufmgr_config.dma_low_water);
10206         tw32(BUFMGR_DMA_HIGH_WATER,
10207              tp->bufmgr_config.dma_high_water);
10208
10209         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10210         if (tg3_asic_rev(tp) == ASIC_REV_5719)
10211                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10212         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10213             tg3_asic_rev(tp) == ASIC_REV_5762 ||
10214             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10215             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10216                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10217         tw32(BUFMGR_MODE, val);
10218         for (i = 0; i < 2000; i++) {
10219                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10220                         break;
10221                 udelay(10);
10222         }
10223         if (i >= 2000) {
10224                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10225                 return -ENODEV;
10226         }
10227
10228         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10229                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10230
10231         tg3_setup_rxbd_thresholds(tp);
10232
10233         /* Initialize TG3_BDINFO's at:
10234          *  RCVDBDI_STD_BD:     standard eth size rx ring
10235          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
10236          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
10237          *
10238          * like so:
10239          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
10240          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
10241          *                              ring attribute flags
10242          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
10243          *
10244          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10245          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10246          *
10247          * The size of each ring is fixed in the firmware, but the location is
10248          * configurable.
10249          */
10250         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10251              ((u64) tpr->rx_std_mapping >> 32));
10252         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10253              ((u64) tpr->rx_std_mapping & 0xffffffff));
10254         if (!tg3_flag(tp, 5717_PLUS))
10255                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10256                      NIC_SRAM_RX_BUFFER_DESC);
10257
10258         /* Disable the mini ring */
10259         if (!tg3_flag(tp, 5705_PLUS))
10260                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10261                      BDINFO_FLAGS_DISABLED);
10262
10263         /* Program the jumbo buffer descriptor ring control
10264          * blocks on those devices that have them.
10265          */
10266         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10267             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10268
10269                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10270                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10271                              ((u64) tpr->rx_jmb_mapping >> 32));
10272                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10273                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10274                         val = TG3_RX_JMB_RING_SIZE(tp) <<
10275                               BDINFO_FLAGS_MAXLEN_SHIFT;
10276                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10277                              val | BDINFO_FLAGS_USE_EXT_RECV);
10278                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10279                             tg3_flag(tp, 57765_CLASS) ||
10280                             tg3_asic_rev(tp) == ASIC_REV_5762)
10281                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10282                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10283                 } else {
10284                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10285                              BDINFO_FLAGS_DISABLED);
10286                 }
10287
10288                 if (tg3_flag(tp, 57765_PLUS)) {
10289                         val = TG3_RX_STD_RING_SIZE(tp);
10290                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10291                         val |= (TG3_RX_STD_DMA_SZ << 2);
10292                 } else
10293                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10294         } else
10295                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10296
10297         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10298
10299         tpr->rx_std_prod_idx = tp->rx_pending;
10300         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10301
10302         tpr->rx_jmb_prod_idx =
10303                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10304         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10305
10306         tg3_rings_reset(tp);
10307
10308         /* Initialize MAC address and backoff seed. */
10309         __tg3_set_mac_addr(tp, false);
10310
10311         /* MTU + ethernet header + FCS + optional VLAN tag */
10312         tw32(MAC_RX_MTU_SIZE,
10313              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10314
10315         /* The slot time is changed by tg3_setup_phy if we
10316          * run at gigabit with half duplex.
10317          */
10318         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10319               (6 << TX_LENGTHS_IPG_SHIFT) |
10320               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10321
10322         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10323             tg3_asic_rev(tp) == ASIC_REV_5762)
10324                 val |= tr32(MAC_TX_LENGTHS) &
10325                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
10326                         TX_LENGTHS_CNT_DWN_VAL_MSK);
10327
10328         tw32(MAC_TX_LENGTHS, val);
10329
10330         /* Receive rules. */
10331         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10332         tw32(RCVLPC_CONFIG, 0x0181);
10333
10334         /* Calculate RDMAC_MODE setting early, we need it to determine
10335          * the RCVLPC_STATE_ENABLE mask.
10336          */
10337         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10338                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10339                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10340                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10341                       RDMAC_MODE_LNGREAD_ENAB);
10342
10343         if (tg3_asic_rev(tp) == ASIC_REV_5717)
10344                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10345
10346         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10347             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10348             tg3_asic_rev(tp) == ASIC_REV_57780)
10349                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10350                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10351                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10352
10353         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10354             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10355                 if (tg3_flag(tp, TSO_CAPABLE)) {
10356                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10357                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10358                            !tg3_flag(tp, IS_5788)) {
10359                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10360                 }
10361         }
10362
10363         if (tg3_flag(tp, PCI_EXPRESS))
10364                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10365
10366         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10367                 tp->dma_limit = 0;
10368                 if (tp->dev->mtu <= ETH_DATA_LEN) {
10369                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10370                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10371                 }
10372         }
10373
10374         if (tg3_flag(tp, HW_TSO_1) ||
10375             tg3_flag(tp, HW_TSO_2) ||
10376             tg3_flag(tp, HW_TSO_3))
10377                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10378
10379         if (tg3_flag(tp, 57765_PLUS) ||
10380             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10381             tg3_asic_rev(tp) == ASIC_REV_57780)
10382                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10383
10384         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10385             tg3_asic_rev(tp) == ASIC_REV_5762)
10386                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10387
10388         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10389             tg3_asic_rev(tp) == ASIC_REV_5784 ||
10390             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10391             tg3_asic_rev(tp) == ASIC_REV_57780 ||
10392             tg3_flag(tp, 57765_PLUS)) {
10393                 u32 tgtreg;
10394
10395                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10396                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10397                 else
10398                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
10399
10400                 val = tr32(tgtreg);
10401                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10402                     tg3_asic_rev(tp) == ASIC_REV_5762) {
10403                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10404                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10405                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10406                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10407                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10408                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10409                 }
10410                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10411         }
10412
10413         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10414             tg3_asic_rev(tp) == ASIC_REV_5720 ||
10415             tg3_asic_rev(tp) == ASIC_REV_5762) {
10416                 u32 tgtreg;
10417
10418                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10419                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10420                 else
10421                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10422
10423                 val = tr32(tgtreg);
10424                 tw32(tgtreg, val |
10425                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10426                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10427         }
10428
10429         /* Receive/send statistics. */
10430         if (tg3_flag(tp, 5750_PLUS)) {
10431                 val = tr32(RCVLPC_STATS_ENABLE);
10432                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10433                 tw32(RCVLPC_STATS_ENABLE, val);
10434         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10435                    tg3_flag(tp, TSO_CAPABLE)) {
10436                 val = tr32(RCVLPC_STATS_ENABLE);
10437                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10438                 tw32(RCVLPC_STATS_ENABLE, val);
10439         } else {
10440                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10441         }
10442         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10443         tw32(SNDDATAI_STATSENAB, 0xffffff);
10444         tw32(SNDDATAI_STATSCTRL,
10445              (SNDDATAI_SCTRL_ENABLE |
10446               SNDDATAI_SCTRL_FASTUPD));
10447
10448         /* Setup host coalescing engine. */
10449         tw32(HOSTCC_MODE, 0);
10450         for (i = 0; i < 2000; i++) {
10451                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10452                         break;
10453                 udelay(10);
10454         }
10455
10456         __tg3_set_coalesce(tp, &tp->coal);
10457
10458         if (!tg3_flag(tp, 5705_PLUS)) {
10459                 /* Status/statistics block address.  See tg3_timer,
10460                  * the tg3_periodic_fetch_stats call there, and
10461                  * tg3_get_stats to see how this works for 5705/5750 chips.
10462                  */
10463                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10464                      ((u64) tp->stats_mapping >> 32));
10465                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10466                      ((u64) tp->stats_mapping & 0xffffffff));
10467                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10468
10469                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10470
10471                 /* Clear statistics and status block memory areas */
10472                 for (i = NIC_SRAM_STATS_BLK;
10473                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10474                      i += sizeof(u32)) {
10475                         tg3_write_mem(tp, i, 0);
10476                         udelay(40);
10477                 }
10478         }
10479
10480         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10481
10482         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10483         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10484         if (!tg3_flag(tp, 5705_PLUS))
10485                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10486
10487         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10488                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10489                 /* reset to prevent losing 1st rx packet intermittently */
10490                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10491                 udelay(10);
10492         }
10493
10494         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10495                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10496                         MAC_MODE_FHDE_ENABLE;
10497         if (tg3_flag(tp, ENABLE_APE))
10498                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10499         if (!tg3_flag(tp, 5705_PLUS) &&
10500             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10501             tg3_asic_rev(tp) != ASIC_REV_5700)
10502                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10503         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10504         udelay(40);
10505
10506         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10507          * If TG3_FLAG_IS_NIC is zero, we should read the
10508          * register to preserve the GPIO settings for LOMs. The GPIOs,
10509          * whether used as inputs or outputs, are set by boot code after
10510          * reset.
10511          */
10512         if (!tg3_flag(tp, IS_NIC)) {
10513                 u32 gpio_mask;
10514
10515                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10516                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10517                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10518
10519                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10520                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10521                                      GRC_LCLCTRL_GPIO_OUTPUT3;
10522
10523                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10524                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10525
10526                 tp->grc_local_ctrl &= ~gpio_mask;
10527                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10528
10529                 /* GPIO1 must be driven high for eeprom write protect */
10530                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10531                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10532                                                GRC_LCLCTRL_GPIO_OUTPUT1);
10533         }
10534         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10535         udelay(100);
10536
10537         if (tg3_flag(tp, USING_MSIX)) {
10538                 val = tr32(MSGINT_MODE);
10539                 val |= MSGINT_MODE_ENABLE;
10540                 if (tp->irq_cnt > 1)
10541                         val |= MSGINT_MODE_MULTIVEC_EN;
10542                 if (!tg3_flag(tp, 1SHOT_MSI))
10543                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10544                 tw32(MSGINT_MODE, val);
10545         }
10546
10547         if (!tg3_flag(tp, 5705_PLUS)) {
10548                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10549                 udelay(40);
10550         }
10551
10552         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10553                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10554                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10555                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10556                WDMAC_MODE_LNGREAD_ENAB);
10557
10558         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10559             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10560                 if (tg3_flag(tp, TSO_CAPABLE) &&
10561                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10562                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10563                         /* nothing */
10564                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10565                            !tg3_flag(tp, IS_5788)) {
10566                         val |= WDMAC_MODE_RX_ACCEL;
10567                 }
10568         }
10569
10570         /* Enable host coalescing bug fix */
10571         if (tg3_flag(tp, 5755_PLUS))
10572                 val |= WDMAC_MODE_STATUS_TAG_FIX;
10573
10574         if (tg3_asic_rev(tp) == ASIC_REV_5785)
10575                 val |= WDMAC_MODE_BURST_ALL_DATA;
10576
10577         tw32_f(WDMAC_MODE, val);
10578         udelay(40);
10579
10580         if (tg3_flag(tp, PCIX_MODE)) {
10581                 u16 pcix_cmd;
10582
10583                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10584                                      &pcix_cmd);
10585                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10586                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10587                         pcix_cmd |= PCI_X_CMD_READ_2K;
10588                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10589                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10590                         pcix_cmd |= PCI_X_CMD_READ_2K;
10591                 }
10592                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10593                                       pcix_cmd);
10594         }
10595
10596         tw32_f(RDMAC_MODE, rdmac_mode);
10597         udelay(40);
10598
10599         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10600             tg3_asic_rev(tp) == ASIC_REV_5720) {
10601                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10602                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10603                                 break;
10604                 }
10605                 if (i < TG3_NUM_RDMA_CHANNELS) {
10606                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10607                         val |= tg3_lso_rd_dma_workaround_bit(tp);
10608                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10609                         tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10610                 }
10611         }
10612
10613         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10614         if (!tg3_flag(tp, 5705_PLUS))
10615                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10616
10617         if (tg3_asic_rev(tp) == ASIC_REV_5761)
10618                 tw32(SNDDATAC_MODE,
10619                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10620         else
10621                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10622
10623         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10624         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10625         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10626         if (tg3_flag(tp, LRG_PROD_RING_CAP))
10627                 val |= RCVDBDI_MODE_LRG_RING_SZ;
10628         tw32(RCVDBDI_MODE, val);
10629         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10630         if (tg3_flag(tp, HW_TSO_1) ||
10631             tg3_flag(tp, HW_TSO_2) ||
10632             tg3_flag(tp, HW_TSO_3))
10633                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10634         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10635         if (tg3_flag(tp, ENABLE_TSS))
10636                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10637         tw32(SNDBDI_MODE, val);
10638         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10639
10640         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10641                 err = tg3_load_5701_a0_firmware_fix(tp);
10642                 if (err)
10643                         return err;
10644         }
10645
10646         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10647                 /* Ignore any errors for the firmware download. If download
10648                  * fails, the device will operate with EEE disabled
10649                  */
10650                 tg3_load_57766_firmware(tp);
10651         }
10652
10653         if (tg3_flag(tp, TSO_CAPABLE)) {
10654                 err = tg3_load_tso_firmware(tp);
10655                 if (err)
10656                         return err;
10657         }
10658
10659         tp->tx_mode = TX_MODE_ENABLE;
10660
10661         if (tg3_flag(tp, 5755_PLUS) ||
10662             tg3_asic_rev(tp) == ASIC_REV_5906)
10663                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10664
10665         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10666             tg3_asic_rev(tp) == ASIC_REV_5762) {
10667                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10668                 tp->tx_mode &= ~val;
10669                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10670         }
10671
10672         tw32_f(MAC_TX_MODE, tp->tx_mode);
10673         udelay(100);
10674
10675         if (tg3_flag(tp, ENABLE_RSS)) {
10676                 u32 rss_key[10];
10677
10678                 tg3_rss_write_indir_tbl(tp);
10679
10680                 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10681
10682                 for (i = 0; i < 10 ; i++)
10683                         tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10684         }
10685
10686         tp->rx_mode = RX_MODE_ENABLE;
10687         if (tg3_flag(tp, 5755_PLUS))
10688                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10689
10690         if (tg3_asic_rev(tp) == ASIC_REV_5762)
10691                 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10692
10693         if (tg3_flag(tp, ENABLE_RSS))
10694                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10695                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
10696                                RX_MODE_RSS_IPV6_HASH_EN |
10697                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
10698                                RX_MODE_RSS_IPV4_HASH_EN |
10699                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
10700
10701         tw32_f(MAC_RX_MODE, tp->rx_mode);
10702         udelay(10);
10703
10704         tw32(MAC_LED_CTRL, tp->led_ctrl);
10705
10706         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10707         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10708                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10709                 udelay(10);
10710         }
10711         tw32_f(MAC_RX_MODE, tp->rx_mode);
10712         udelay(10);
10713
10714         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10715                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10716                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10717                         /* Set drive transmission level to 1.2V  */
10718                         /* only if the signal pre-emphasis bit is not set  */
10719                         val = tr32(MAC_SERDES_CFG);
10720                         val &= 0xfffff000;
10721                         val |= 0x880;
10722                         tw32(MAC_SERDES_CFG, val);
10723                 }
10724                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10725                         tw32(MAC_SERDES_CFG, 0x616000);
10726         }
10727
10728         /* Prevent chip from dropping frames when flow control
10729          * is enabled.
10730          */
10731         if (tg3_flag(tp, 57765_CLASS))
10732                 val = 1;
10733         else
10734                 val = 2;
10735         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10736
10737         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10738             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10739                 /* Use hardware link auto-negotiation */
10740                 tg3_flag_set(tp, HW_AUTONEG);
10741         }
10742
10743         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10744             tg3_asic_rev(tp) == ASIC_REV_5714) {
10745                 u32 tmp;
10746
10747                 tmp = tr32(SERDES_RX_CTRL);
10748                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10749                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10750                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10751                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10752         }
10753
10754         if (!tg3_flag(tp, USE_PHYLIB)) {
10755                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10756                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10757
10758                 err = tg3_setup_phy(tp, false);
10759                 if (err)
10760                         return err;
10761
10762                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10763                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10764                         u32 tmp;
10765
10766                         /* Clear CRC stats. */
10767                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10768                                 tg3_writephy(tp, MII_TG3_TEST1,
10769                                              tmp | MII_TG3_TEST1_CRC_EN);
10770                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10771                         }
10772                 }
10773         }
10774
10775         __tg3_set_rx_mode(tp->dev);
10776
10777         /* Initialize receive rules. */
10778         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10779         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10780         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10781         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10782
10783         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10784                 limit = 8;
10785         else
10786                 limit = 16;
10787         if (tg3_flag(tp, ENABLE_ASF))
10788                 limit -= 4;
10789         switch (limit) {
10790         case 16:
10791                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10792                 fallthrough;
10793         case 15:
10794                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10795                 fallthrough;
10796         case 14:
10797                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10798                 fallthrough;
10799         case 13:
10800                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10801                 fallthrough;
10802         case 12:
10803                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10804                 fallthrough;
10805         case 11:
10806                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10807                 fallthrough;
10808         case 10:
10809                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10810                 fallthrough;
10811         case 9:
10812                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10813                 fallthrough;
10814         case 8:
10815                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10816                 fallthrough;
10817         case 7:
10818                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10819                 fallthrough;
10820         case 6:
10821                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10822                 fallthrough;
10823         case 5:
10824                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10825                 fallthrough;
10826         case 4:
10827                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10828         case 3:
10829                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10830         case 2:
10831         case 1:
10832
10833         default:
10834                 break;
10835         }
10836
10837         if (tg3_flag(tp, ENABLE_APE))
10838                 /* Write our heartbeat update interval to APE. */
10839                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10840                                 APE_HOST_HEARTBEAT_INT_5SEC);
10841
10842         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10843
10844         return 0;
10845 }
10846
10847 /* Called at device open time to get the chip ready for
10848  * packet processing.  Invoked with tp->lock held.
10849  */
10850 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10851 {
10852         /* Chip may have been just powered on. If so, the boot code may still
10853          * be running initialization. Wait for it to finish to avoid races in
10854          * accessing the hardware.
10855          */
10856         tg3_enable_register_access(tp);
10857         tg3_poll_fw(tp);
10858
10859         tg3_switch_clocks(tp);
10860
10861         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10862
10863         return tg3_reset_hw(tp, reset_phy);
10864 }
10865
10866 #ifdef CONFIG_TIGON3_HWMON
10867 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10868 {
10869         u32 off, len = TG3_OCIR_LEN;
10870         int i;
10871
10872         for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) {
10873                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10874
10875                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10876                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10877                         memset(ocir, 0, len);
10878         }
10879 }
10880
10881 /* sysfs attributes for hwmon */
10882 static ssize_t tg3_show_temp(struct device *dev,
10883                              struct device_attribute *devattr, char *buf)
10884 {
10885         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10886         struct tg3 *tp = dev_get_drvdata(dev);
10887         u32 temperature;
10888
10889         spin_lock_bh(&tp->lock);
10890         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10891                                 sizeof(temperature));
10892         spin_unlock_bh(&tp->lock);
10893         return sprintf(buf, "%u\n", temperature * 1000);
10894 }
10895
10896
10897 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10898                           TG3_TEMP_SENSOR_OFFSET);
10899 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10900                           TG3_TEMP_CAUTION_OFFSET);
10901 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10902                           TG3_TEMP_MAX_OFFSET);
10903
10904 static struct attribute *tg3_attrs[] = {
10905         &sensor_dev_attr_temp1_input.dev_attr.attr,
10906         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10907         &sensor_dev_attr_temp1_max.dev_attr.attr,
10908         NULL
10909 };
10910 ATTRIBUTE_GROUPS(tg3);
10911
10912 static void tg3_hwmon_close(struct tg3 *tp)
10913 {
10914         if (tp->hwmon_dev) {
10915                 hwmon_device_unregister(tp->hwmon_dev);
10916                 tp->hwmon_dev = NULL;
10917         }
10918 }
10919
10920 static void tg3_hwmon_open(struct tg3 *tp)
10921 {
10922         int i;
10923         u32 size = 0;
10924         struct pci_dev *pdev = tp->pdev;
10925         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10926
10927         tg3_sd_scan_scratchpad(tp, ocirs);
10928
10929         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10930                 if (!ocirs[i].src_data_length)
10931                         continue;
10932
10933                 size += ocirs[i].src_hdr_length;
10934                 size += ocirs[i].src_data_length;
10935         }
10936
10937         if (!size)
10938                 return;
10939
10940         tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10941                                                           tp, tg3_groups);
10942         if (IS_ERR(tp->hwmon_dev)) {
10943                 tp->hwmon_dev = NULL;
10944                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10945         }
10946 }
10947 #else
10948 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10949 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10950 #endif /* CONFIG_TIGON3_HWMON */
10951
10952
10953 #define TG3_STAT_ADD32(PSTAT, REG) \
10954 do {    u32 __val = tr32(REG); \
10955         (PSTAT)->low += __val; \
10956         if ((PSTAT)->low < __val) \
10957                 (PSTAT)->high += 1; \
10958 } while (0)
10959
10960 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10961 {
10962         struct tg3_hw_stats *sp = tp->hw_stats;
10963
10964         if (!tp->link_up)
10965                 return;
10966
10967         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10968         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10969         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10970         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10971         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10972         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10973         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10974         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10975         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10976         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10977         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10978         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10979         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10980         if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10981                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10982                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10983                 u32 val;
10984
10985                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10986                 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10987                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10988                 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10989         }
10990
10991         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10992         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10993         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10994         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10995         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10996         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10997         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10998         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10999         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
11000         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
11001         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
11002         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
11003         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
11004         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
11005
11006         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
11007         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
11008             tg3_asic_rev(tp) != ASIC_REV_5762 &&
11009             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
11010             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
11011                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
11012         } else {
11013                 u32 val = tr32(HOSTCC_FLOW_ATTN);
11014                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
11015                 if (val) {
11016                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
11017                         sp->rx_discards.low += val;
11018                         if (sp->rx_discards.low < val)
11019                                 sp->rx_discards.high += 1;
11020                 }
11021                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
11022         }
11023         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
11024 }
11025
11026 static void tg3_chk_missed_msi(struct tg3 *tp)
11027 {
11028         u32 i;
11029
11030         for (i = 0; i < tp->irq_cnt; i++) {
11031                 struct tg3_napi *tnapi = &tp->napi[i];
11032
11033                 if (tg3_has_work(tnapi)) {
11034                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
11035                             tnapi->last_tx_cons == tnapi->tx_cons) {
11036                                 if (tnapi->chk_msi_cnt < 1) {
11037                                         tnapi->chk_msi_cnt++;
11038                                         return;
11039                                 }
11040                                 tg3_msi(0, tnapi);
11041                         }
11042                 }
11043                 tnapi->chk_msi_cnt = 0;
11044                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
11045                 tnapi->last_tx_cons = tnapi->tx_cons;
11046         }
11047 }
11048
11049 static void tg3_timer(struct timer_list *t)
11050 {
11051         struct tg3 *tp = from_timer(tp, t, timer);
11052
11053         spin_lock(&tp->lock);
11054
11055         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
11056                 spin_unlock(&tp->lock);
11057                 goto restart_timer;
11058         }
11059
11060         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
11061             tg3_flag(tp, 57765_CLASS))
11062                 tg3_chk_missed_msi(tp);
11063
11064         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
11065                 /* BCM4785: Flush posted writes from GbE to host memory. */
11066                 tr32(HOSTCC_MODE);
11067         }
11068
11069         if (!tg3_flag(tp, TAGGED_STATUS)) {
11070                 /* All of this garbage is because when using non-tagged
11071                  * IRQ status the mailbox/status_block protocol the chip
11072                  * uses with the cpu is race prone.
11073                  */
11074                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
11075                         tw32(GRC_LOCAL_CTRL,
11076                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
11077                 } else {
11078                         tw32(HOSTCC_MODE, tp->coalesce_mode |
11079                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
11080                 }
11081
11082                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11083                         spin_unlock(&tp->lock);
11084                         tg3_reset_task_schedule(tp);
11085                         goto restart_timer;
11086                 }
11087         }
11088
11089         /* This part only runs once per second. */
11090         if (!--tp->timer_counter) {
11091                 if (tg3_flag(tp, 5705_PLUS))
11092                         tg3_periodic_fetch_stats(tp);
11093
11094                 if (tp->setlpicnt && !--tp->setlpicnt)
11095                         tg3_phy_eee_enable(tp);
11096
11097                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
11098                         u32 mac_stat;
11099                         int phy_event;
11100
11101                         mac_stat = tr32(MAC_STATUS);
11102
11103                         phy_event = 0;
11104                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11105                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11106                                         phy_event = 1;
11107                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11108                                 phy_event = 1;
11109
11110                         if (phy_event)
11111                                 tg3_setup_phy(tp, false);
11112                 } else if (tg3_flag(tp, POLL_SERDES)) {
11113                         u32 mac_stat = tr32(MAC_STATUS);
11114                         int need_setup = 0;
11115
11116                         if (tp->link_up &&
11117                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11118                                 need_setup = 1;
11119                         }
11120                         if (!tp->link_up &&
11121                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
11122                                          MAC_STATUS_SIGNAL_DET))) {
11123                                 need_setup = 1;
11124                         }
11125                         if (need_setup) {
11126                                 if (!tp->serdes_counter) {
11127                                         tw32_f(MAC_MODE,
11128                                              (tp->mac_mode &
11129                                               ~MAC_MODE_PORT_MODE_MASK));
11130                                         udelay(40);
11131                                         tw32_f(MAC_MODE, tp->mac_mode);
11132                                         udelay(40);
11133                                 }
11134                                 tg3_setup_phy(tp, false);
11135                         }
11136                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11137                            tg3_flag(tp, 5780_CLASS)) {
11138                         tg3_serdes_parallel_detect(tp);
11139                 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11140                         u32 cpmu = tr32(TG3_CPMU_STATUS);
11141                         bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11142                                          TG3_CPMU_STATUS_LINK_MASK);
11143
11144                         if (link_up != tp->link_up)
11145                                 tg3_setup_phy(tp, false);
11146                 }
11147
11148                 tp->timer_counter = tp->timer_multiplier;
11149         }
11150
11151         /* Heartbeat is only sent once every 2 seconds.
11152          *
11153          * The heartbeat is to tell the ASF firmware that the host
11154          * driver is still alive.  In the event that the OS crashes,
11155          * ASF needs to reset the hardware to free up the FIFO space
11156          * that may be filled with rx packets destined for the host.
11157          * If the FIFO is full, ASF will no longer function properly.
11158          *
11159          * Unintended resets have been reported on real time kernels
11160          * where the timer doesn't run on time.  Netpoll will also have
11161          * same problem.
11162          *
11163          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11164          * to check the ring condition when the heartbeat is expiring
11165          * before doing the reset.  This will prevent most unintended
11166          * resets.
11167          */
11168         if (!--tp->asf_counter) {
11169                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11170                         tg3_wait_for_event_ack(tp);
11171
11172                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11173                                       FWCMD_NICDRV_ALIVE3);
11174                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11175                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11176                                       TG3_FW_UPDATE_TIMEOUT_SEC);
11177
11178                         tg3_generate_fw_event(tp);
11179                 }
11180                 tp->asf_counter = tp->asf_multiplier;
11181         }
11182
11183         /* Update the APE heartbeat every 5 seconds.*/
11184         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11185
11186         spin_unlock(&tp->lock);
11187
11188 restart_timer:
11189         tp->timer.expires = jiffies + tp->timer_offset;
11190         add_timer(&tp->timer);
11191 }
11192
11193 static void tg3_timer_init(struct tg3 *tp)
11194 {
11195         if (tg3_flag(tp, TAGGED_STATUS) &&
11196             tg3_asic_rev(tp) != ASIC_REV_5717 &&
11197             !tg3_flag(tp, 57765_CLASS))
11198                 tp->timer_offset = HZ;
11199         else
11200                 tp->timer_offset = HZ / 10;
11201
11202         BUG_ON(tp->timer_offset > HZ);
11203
11204         tp->timer_multiplier = (HZ / tp->timer_offset);
11205         tp->asf_multiplier = (HZ / tp->timer_offset) *
11206                              TG3_FW_UPDATE_FREQ_SEC;
11207
11208         timer_setup(&tp->timer, tg3_timer, 0);
11209 }
11210
11211 static void tg3_timer_start(struct tg3 *tp)
11212 {
11213         tp->asf_counter   = tp->asf_multiplier;
11214         tp->timer_counter = tp->timer_multiplier;
11215
11216         tp->timer.expires = jiffies + tp->timer_offset;
11217         add_timer(&tp->timer);
11218 }
11219
11220 static void tg3_timer_stop(struct tg3 *tp)
11221 {
11222         del_timer_sync(&tp->timer);
11223 }
11224
11225 /* Restart hardware after configuration changes, self-test, etc.
11226  * Invoked with tp->lock held.
11227  */
11228 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11229         __releases(tp->lock)
11230         __acquires(tp->lock)
11231 {
11232         int err;
11233
11234         err = tg3_init_hw(tp, reset_phy);
11235         if (err) {
11236                 netdev_err(tp->dev,
11237                            "Failed to re-initialize device, aborting\n");
11238                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11239                 tg3_full_unlock(tp);
11240                 tg3_timer_stop(tp);
11241                 tp->irq_sync = 0;
11242                 tg3_napi_enable(tp);
11243                 dev_close(tp->dev);
11244                 tg3_full_lock(tp, 0);
11245         }
11246         return err;
11247 }
11248
11249 static void tg3_reset_task(struct work_struct *work)
11250 {
11251         struct tg3 *tp = container_of(work, struct tg3, reset_task);
11252         int err;
11253
11254         rtnl_lock();
11255         tg3_full_lock(tp, 0);
11256
11257         if (tp->pcierr_recovery || !netif_running(tp->dev) ||
11258             tp->pdev->error_state != pci_channel_io_normal) {
11259                 tg3_flag_clear(tp, RESET_TASK_PENDING);
11260                 tg3_full_unlock(tp);
11261                 rtnl_unlock();
11262                 return;
11263         }
11264
11265         tg3_full_unlock(tp);
11266
11267         tg3_phy_stop(tp);
11268
11269         tg3_netif_stop(tp);
11270
11271         tg3_full_lock(tp, 1);
11272
11273         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11274                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11275                 tp->write32_rx_mbox = tg3_write_flush_reg32;
11276                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11277                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11278         }
11279
11280         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11281         err = tg3_init_hw(tp, true);
11282         if (err) {
11283                 tg3_full_unlock(tp);
11284                 tp->irq_sync = 0;
11285                 tg3_napi_enable(tp);
11286                 /* Clear this flag so that tg3_reset_task_cancel() will not
11287                  * call cancel_work_sync() and wait forever.
11288                  */
11289                 tg3_flag_clear(tp, RESET_TASK_PENDING);
11290                 dev_close(tp->dev);
11291                 goto out;
11292         }
11293
11294         tg3_netif_start(tp);
11295         tg3_full_unlock(tp);
11296         tg3_phy_start(tp);
11297         tg3_flag_clear(tp, RESET_TASK_PENDING);
11298 out:
11299         rtnl_unlock();
11300 }
11301
11302 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11303 {
11304         irq_handler_t fn;
11305         unsigned long flags;
11306         char *name;
11307         struct tg3_napi *tnapi = &tp->napi[irq_num];
11308
11309         if (tp->irq_cnt == 1)
11310                 name = tp->dev->name;
11311         else {
11312                 name = &tnapi->irq_lbl[0];
11313                 if (tnapi->tx_buffers && tnapi->rx_rcb)
11314                         snprintf(name, IFNAMSIZ,
11315                                  "%s-txrx-%d", tp->dev->name, irq_num);
11316                 else if (tnapi->tx_buffers)
11317                         snprintf(name, IFNAMSIZ,
11318                                  "%s-tx-%d", tp->dev->name, irq_num);
11319                 else if (tnapi->rx_rcb)
11320                         snprintf(name, IFNAMSIZ,
11321                                  "%s-rx-%d", tp->dev->name, irq_num);
11322                 else
11323                         snprintf(name, IFNAMSIZ,
11324                                  "%s-%d", tp->dev->name, irq_num);
11325                 name[IFNAMSIZ-1] = 0;
11326         }
11327
11328         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11329                 fn = tg3_msi;
11330                 if (tg3_flag(tp, 1SHOT_MSI))
11331                         fn = tg3_msi_1shot;
11332                 flags = 0;
11333         } else {
11334                 fn = tg3_interrupt;
11335                 if (tg3_flag(tp, TAGGED_STATUS))
11336                         fn = tg3_interrupt_tagged;
11337                 flags = IRQF_SHARED;
11338         }
11339
11340         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11341 }
11342
11343 static int tg3_test_interrupt(struct tg3 *tp)
11344 {
11345         struct tg3_napi *tnapi = &tp->napi[0];
11346         struct net_device *dev = tp->dev;
11347         int err, i, intr_ok = 0;
11348         u32 val;
11349
11350         if (!netif_running(dev))
11351                 return -ENODEV;
11352
11353         tg3_disable_ints(tp);
11354
11355         free_irq(tnapi->irq_vec, tnapi);
11356
11357         /*
11358          * Turn off MSI one shot mode.  Otherwise this test has no
11359          * observable way to know whether the interrupt was delivered.
11360          */
11361         if (tg3_flag(tp, 57765_PLUS)) {
11362                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11363                 tw32(MSGINT_MODE, val);
11364         }
11365
11366         err = request_irq(tnapi->irq_vec, tg3_test_isr,
11367                           IRQF_SHARED, dev->name, tnapi);
11368         if (err)
11369                 return err;
11370
11371         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11372         tg3_enable_ints(tp);
11373
11374         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11375                tnapi->coal_now);
11376
11377         for (i = 0; i < 5; i++) {
11378                 u32 int_mbox, misc_host_ctrl;
11379
11380                 int_mbox = tr32_mailbox(tnapi->int_mbox);
11381                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11382
11383                 if ((int_mbox != 0) ||
11384                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11385                         intr_ok = 1;
11386                         break;
11387                 }
11388
11389                 if (tg3_flag(tp, 57765_PLUS) &&
11390                     tnapi->hw_status->status_tag != tnapi->last_tag)
11391                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11392
11393                 msleep(10);
11394         }
11395
11396         tg3_disable_ints(tp);
11397
11398         free_irq(tnapi->irq_vec, tnapi);
11399
11400         err = tg3_request_irq(tp, 0);
11401
11402         if (err)
11403                 return err;
11404
11405         if (intr_ok) {
11406                 /* Reenable MSI one shot mode. */
11407                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11408                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11409                         tw32(MSGINT_MODE, val);
11410                 }
11411                 return 0;
11412         }
11413
11414         return -EIO;
11415 }
11416
11417 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11418  * successfully restored
11419  */
11420 static int tg3_test_msi(struct tg3 *tp)
11421 {
11422         int err;
11423         u16 pci_cmd;
11424
11425         if (!tg3_flag(tp, USING_MSI))
11426                 return 0;
11427
11428         /* Turn off SERR reporting in case MSI terminates with Master
11429          * Abort.
11430          */
11431         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11432         pci_write_config_word(tp->pdev, PCI_COMMAND,
11433                               pci_cmd & ~PCI_COMMAND_SERR);
11434
11435         err = tg3_test_interrupt(tp);
11436
11437         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11438
11439         if (!err)
11440                 return 0;
11441
11442         /* other failures */
11443         if (err != -EIO)
11444                 return err;
11445
11446         /* MSI test failed, go back to INTx mode */
11447         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11448                     "to INTx mode. Please report this failure to the PCI "
11449                     "maintainer and include system chipset information\n");
11450
11451         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11452
11453         pci_disable_msi(tp->pdev);
11454
11455         tg3_flag_clear(tp, USING_MSI);
11456         tp->napi[0].irq_vec = tp->pdev->irq;
11457
11458         err = tg3_request_irq(tp, 0);
11459         if (err)
11460                 return err;
11461
11462         /* Need to reset the chip because the MSI cycle may have terminated
11463          * with Master Abort.
11464          */
11465         tg3_full_lock(tp, 1);
11466
11467         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11468         err = tg3_init_hw(tp, true);
11469
11470         tg3_full_unlock(tp);
11471
11472         if (err)
11473                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11474
11475         return err;
11476 }
11477
11478 static int tg3_request_firmware(struct tg3 *tp)
11479 {
11480         const struct tg3_firmware_hdr *fw_hdr;
11481
11482         if (reject_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11483                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11484                            tp->fw_needed);
11485                 return -ENOENT;
11486         }
11487
11488         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11489
11490         /* Firmware blob starts with version numbers, followed by
11491          * start address and _full_ length including BSS sections
11492          * (which must be longer than the actual data, of course
11493          */
11494
11495         tp->fw_len = be32_to_cpu(fw_hdr->len);  /* includes bss */
11496         if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11497                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11498                            tp->fw_len, tp->fw_needed);
11499                 release_firmware(tp->fw);
11500                 tp->fw = NULL;
11501                 return -EINVAL;
11502         }
11503
11504         /* We no longer need firmware; we have it. */
11505         tp->fw_needed = NULL;
11506         return 0;
11507 }
11508
11509 static u32 tg3_irq_count(struct tg3 *tp)
11510 {
11511         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11512
11513         if (irq_cnt > 1) {
11514                 /* We want as many rx rings enabled as there are cpus.
11515                  * In multiqueue MSI-X mode, the first MSI-X vector
11516                  * only deals with link interrupts, etc, so we add
11517                  * one to the number of vectors we are requesting.
11518                  */
11519                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11520         }
11521
11522         return irq_cnt;
11523 }
11524
11525 static bool tg3_enable_msix(struct tg3 *tp)
11526 {
11527         int i, rc;
11528         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11529
11530         tp->txq_cnt = tp->txq_req;
11531         tp->rxq_cnt = tp->rxq_req;
11532         if (!tp->rxq_cnt)
11533                 tp->rxq_cnt = netif_get_num_default_rss_queues();
11534         if (tp->rxq_cnt > tp->rxq_max)
11535                 tp->rxq_cnt = tp->rxq_max;
11536
11537         /* Disable multiple TX rings by default.  Simple round-robin hardware
11538          * scheduling of the TX rings can cause starvation of rings with
11539          * small packets when other rings have TSO or jumbo packets.
11540          */
11541         if (!tp->txq_req)
11542                 tp->txq_cnt = 1;
11543
11544         tp->irq_cnt = tg3_irq_count(tp);
11545
11546         for (i = 0; i < tp->irq_max; i++) {
11547                 msix_ent[i].entry  = i;
11548                 msix_ent[i].vector = 0;
11549         }
11550
11551         rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11552         if (rc < 0) {
11553                 return false;
11554         } else if (rc < tp->irq_cnt) {
11555                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11556                               tp->irq_cnt, rc);
11557                 tp->irq_cnt = rc;
11558                 tp->rxq_cnt = max(rc - 1, 1);
11559                 if (tp->txq_cnt)
11560                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11561         }
11562
11563         for (i = 0; i < tp->irq_max; i++)
11564                 tp->napi[i].irq_vec = msix_ent[i].vector;
11565
11566         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11567                 pci_disable_msix(tp->pdev);
11568                 return false;
11569         }
11570
11571         if (tp->irq_cnt == 1)
11572                 return true;
11573
11574         tg3_flag_set(tp, ENABLE_RSS);
11575
11576         if (tp->txq_cnt > 1)
11577                 tg3_flag_set(tp, ENABLE_TSS);
11578
11579         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11580
11581         return true;
11582 }
11583
11584 static void tg3_ints_init(struct tg3 *tp)
11585 {
11586         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11587             !tg3_flag(tp, TAGGED_STATUS)) {
11588                 /* All MSI supporting chips should support tagged
11589                  * status.  Assert that this is the case.
11590                  */
11591                 netdev_warn(tp->dev,
11592                             "MSI without TAGGED_STATUS? Not using MSI\n");
11593                 goto defcfg;
11594         }
11595
11596         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11597                 tg3_flag_set(tp, USING_MSIX);
11598         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11599                 tg3_flag_set(tp, USING_MSI);
11600
11601         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11602                 u32 msi_mode = tr32(MSGINT_MODE);
11603                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11604                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11605                 if (!tg3_flag(tp, 1SHOT_MSI))
11606                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11607                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11608         }
11609 defcfg:
11610         if (!tg3_flag(tp, USING_MSIX)) {
11611                 tp->irq_cnt = 1;
11612                 tp->napi[0].irq_vec = tp->pdev->irq;
11613         }
11614
11615         if (tp->irq_cnt == 1) {
11616                 tp->txq_cnt = 1;
11617                 tp->rxq_cnt = 1;
11618                 netif_set_real_num_tx_queues(tp->dev, 1);
11619                 netif_set_real_num_rx_queues(tp->dev, 1);
11620         }
11621 }
11622
11623 static void tg3_ints_fini(struct tg3 *tp)
11624 {
11625         if (tg3_flag(tp, USING_MSIX))
11626                 pci_disable_msix(tp->pdev);
11627         else if (tg3_flag(tp, USING_MSI))
11628                 pci_disable_msi(tp->pdev);
11629         tg3_flag_clear(tp, USING_MSI);
11630         tg3_flag_clear(tp, USING_MSIX);
11631         tg3_flag_clear(tp, ENABLE_RSS);
11632         tg3_flag_clear(tp, ENABLE_TSS);
11633 }
11634
11635 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11636                      bool init)
11637 {
11638         struct net_device *dev = tp->dev;
11639         int i, err;
11640
11641         /*
11642          * Setup interrupts first so we know how
11643          * many NAPI resources to allocate
11644          */
11645         tg3_ints_init(tp);
11646
11647         tg3_rss_check_indir_tbl(tp);
11648
11649         /* The placement of this call is tied
11650          * to the setup and use of Host TX descriptors.
11651          */
11652         err = tg3_alloc_consistent(tp);
11653         if (err)
11654                 goto out_ints_fini;
11655
11656         tg3_napi_init(tp);
11657
11658         tg3_napi_enable(tp);
11659
11660         for (i = 0; i < tp->irq_cnt; i++) {
11661                 err = tg3_request_irq(tp, i);
11662                 if (err) {
11663                         for (i--; i >= 0; i--) {
11664                                 struct tg3_napi *tnapi = &tp->napi[i];
11665
11666                                 free_irq(tnapi->irq_vec, tnapi);
11667                         }
11668                         goto out_napi_fini;
11669                 }
11670         }
11671
11672         tg3_full_lock(tp, 0);
11673
11674         if (init)
11675                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11676
11677         err = tg3_init_hw(tp, reset_phy);
11678         if (err) {
11679                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11680                 tg3_free_rings(tp);
11681         }
11682
11683         tg3_full_unlock(tp);
11684
11685         if (err)
11686                 goto out_free_irq;
11687
11688         if (test_irq && tg3_flag(tp, USING_MSI)) {
11689                 err = tg3_test_msi(tp);
11690
11691                 if (err) {
11692                         tg3_full_lock(tp, 0);
11693                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11694                         tg3_free_rings(tp);
11695                         tg3_full_unlock(tp);
11696
11697                         goto out_napi_fini;
11698                 }
11699
11700                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11701                         u32 val = tr32(PCIE_TRANSACTION_CFG);
11702
11703                         tw32(PCIE_TRANSACTION_CFG,
11704                              val | PCIE_TRANS_CFG_1SHOT_MSI);
11705                 }
11706         }
11707
11708         tg3_phy_start(tp);
11709
11710         tg3_hwmon_open(tp);
11711
11712         tg3_full_lock(tp, 0);
11713
11714         tg3_timer_start(tp);
11715         tg3_flag_set(tp, INIT_COMPLETE);
11716         tg3_enable_ints(tp);
11717
11718         tg3_ptp_resume(tp);
11719
11720         tg3_full_unlock(tp);
11721
11722         netif_tx_start_all_queues(dev);
11723
11724         /*
11725          * Reset loopback feature if it was turned on while the device was down
11726          * make sure that it's installed properly now.
11727          */
11728         if (dev->features & NETIF_F_LOOPBACK)
11729                 tg3_set_loopback(dev, dev->features);
11730
11731         return 0;
11732
11733 out_free_irq:
11734         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11735                 struct tg3_napi *tnapi = &tp->napi[i];
11736                 free_irq(tnapi->irq_vec, tnapi);
11737         }
11738
11739 out_napi_fini:
11740         tg3_napi_disable(tp);
11741         tg3_napi_fini(tp);
11742         tg3_free_consistent(tp);
11743
11744 out_ints_fini:
11745         tg3_ints_fini(tp);
11746
11747         return err;
11748 }
11749
11750 static void tg3_stop(struct tg3 *tp)
11751 {
11752         int i;
11753
11754         tg3_reset_task_cancel(tp);
11755         tg3_netif_stop(tp);
11756
11757         tg3_timer_stop(tp);
11758
11759         tg3_hwmon_close(tp);
11760
11761         tg3_phy_stop(tp);
11762
11763         tg3_full_lock(tp, 1);
11764
11765         tg3_disable_ints(tp);
11766
11767         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11768         tg3_free_rings(tp);
11769         tg3_flag_clear(tp, INIT_COMPLETE);
11770
11771         tg3_full_unlock(tp);
11772
11773         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11774                 struct tg3_napi *tnapi = &tp->napi[i];
11775                 free_irq(tnapi->irq_vec, tnapi);
11776         }
11777
11778         tg3_ints_fini(tp);
11779
11780         tg3_napi_fini(tp);
11781
11782         tg3_free_consistent(tp);
11783 }
11784
11785 static int tg3_open(struct net_device *dev)
11786 {
11787         struct tg3 *tp = netdev_priv(dev);
11788         int err;
11789
11790         if (tp->pcierr_recovery) {
11791                 netdev_err(dev, "Failed to open device. PCI error recovery "
11792                            "in progress\n");
11793                 return -EAGAIN;
11794         }
11795
11796         if (tp->fw_needed) {
11797                 err = tg3_request_firmware(tp);
11798                 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11799                         if (err) {
11800                                 netdev_warn(tp->dev, "EEE capability disabled\n");
11801                                 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11802                         } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11803                                 netdev_warn(tp->dev, "EEE capability restored\n");
11804                                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11805                         }
11806                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11807                         if (err)
11808                                 return err;
11809                 } else if (err) {
11810                         netdev_warn(tp->dev, "TSO capability disabled\n");
11811                         tg3_flag_clear(tp, TSO_CAPABLE);
11812                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11813                         netdev_notice(tp->dev, "TSO capability restored\n");
11814                         tg3_flag_set(tp, TSO_CAPABLE);
11815                 }
11816         }
11817
11818         tg3_carrier_off(tp);
11819
11820         err = tg3_power_up(tp);
11821         if (err)
11822                 return err;
11823
11824         tg3_full_lock(tp, 0);
11825
11826         tg3_disable_ints(tp);
11827         tg3_flag_clear(tp, INIT_COMPLETE);
11828
11829         tg3_full_unlock(tp);
11830
11831         err = tg3_start(tp,
11832                         !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11833                         true, true);
11834         if (err) {
11835                 tg3_frob_aux_power(tp, false);
11836                 pci_set_power_state(tp->pdev, PCI_D3hot);
11837         }
11838
11839         return err;
11840 }
11841
11842 static int tg3_close(struct net_device *dev)
11843 {
11844         struct tg3 *tp = netdev_priv(dev);
11845
11846         if (tp->pcierr_recovery) {
11847                 netdev_err(dev, "Failed to close device. PCI error recovery "
11848                            "in progress\n");
11849                 return -EAGAIN;
11850         }
11851
11852         tg3_stop(tp);
11853
11854         if (pci_device_is_present(tp->pdev)) {
11855                 tg3_power_down_prepare(tp);
11856
11857                 tg3_carrier_off(tp);
11858         }
11859         return 0;
11860 }
11861
11862 static inline u64 get_stat64(tg3_stat64_t *val)
11863 {
11864        return ((u64)val->high << 32) | ((u64)val->low);
11865 }
11866
11867 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11868 {
11869         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11870
11871         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11872             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11873              tg3_asic_rev(tp) == ASIC_REV_5701)) {
11874                 u32 val;
11875
11876                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11877                         tg3_writephy(tp, MII_TG3_TEST1,
11878                                      val | MII_TG3_TEST1_CRC_EN);
11879                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11880                 } else
11881                         val = 0;
11882
11883                 tp->phy_crc_errors += val;
11884
11885                 return tp->phy_crc_errors;
11886         }
11887
11888         return get_stat64(&hw_stats->rx_fcs_errors);
11889 }
11890
11891 #define ESTAT_ADD(member) \
11892         estats->member =        old_estats->member + \
11893                                 get_stat64(&hw_stats->member)
11894
11895 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11896 {
11897         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11898         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11899
11900         ESTAT_ADD(rx_octets);
11901         ESTAT_ADD(rx_fragments);
11902         ESTAT_ADD(rx_ucast_packets);
11903         ESTAT_ADD(rx_mcast_packets);
11904         ESTAT_ADD(rx_bcast_packets);
11905         ESTAT_ADD(rx_fcs_errors);
11906         ESTAT_ADD(rx_align_errors);
11907         ESTAT_ADD(rx_xon_pause_rcvd);
11908         ESTAT_ADD(rx_xoff_pause_rcvd);
11909         ESTAT_ADD(rx_mac_ctrl_rcvd);
11910         ESTAT_ADD(rx_xoff_entered);
11911         ESTAT_ADD(rx_frame_too_long_errors);
11912         ESTAT_ADD(rx_jabbers);
11913         ESTAT_ADD(rx_undersize_packets);
11914         ESTAT_ADD(rx_in_length_errors);
11915         ESTAT_ADD(rx_out_length_errors);
11916         ESTAT_ADD(rx_64_or_less_octet_packets);
11917         ESTAT_ADD(rx_65_to_127_octet_packets);
11918         ESTAT_ADD(rx_128_to_255_octet_packets);
11919         ESTAT_ADD(rx_256_to_511_octet_packets);
11920         ESTAT_ADD(rx_512_to_1023_octet_packets);
11921         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11922         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11923         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11924         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11925         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11926
11927         ESTAT_ADD(tx_octets);
11928         ESTAT_ADD(tx_collisions);
11929         ESTAT_ADD(tx_xon_sent);
11930         ESTAT_ADD(tx_xoff_sent);
11931         ESTAT_ADD(tx_flow_control);
11932         ESTAT_ADD(tx_mac_errors);
11933         ESTAT_ADD(tx_single_collisions);
11934         ESTAT_ADD(tx_mult_collisions);
11935         ESTAT_ADD(tx_deferred);
11936         ESTAT_ADD(tx_excessive_collisions);
11937         ESTAT_ADD(tx_late_collisions);
11938         ESTAT_ADD(tx_collide_2times);
11939         ESTAT_ADD(tx_collide_3times);
11940         ESTAT_ADD(tx_collide_4times);
11941         ESTAT_ADD(tx_collide_5times);
11942         ESTAT_ADD(tx_collide_6times);
11943         ESTAT_ADD(tx_collide_7times);
11944         ESTAT_ADD(tx_collide_8times);
11945         ESTAT_ADD(tx_collide_9times);
11946         ESTAT_ADD(tx_collide_10times);
11947         ESTAT_ADD(tx_collide_11times);
11948         ESTAT_ADD(tx_collide_12times);
11949         ESTAT_ADD(tx_collide_13times);
11950         ESTAT_ADD(tx_collide_14times);
11951         ESTAT_ADD(tx_collide_15times);
11952         ESTAT_ADD(tx_ucast_packets);
11953         ESTAT_ADD(tx_mcast_packets);
11954         ESTAT_ADD(tx_bcast_packets);
11955         ESTAT_ADD(tx_carrier_sense_errors);
11956         ESTAT_ADD(tx_discards);
11957         ESTAT_ADD(tx_errors);
11958
11959         ESTAT_ADD(dma_writeq_full);
11960         ESTAT_ADD(dma_write_prioq_full);
11961         ESTAT_ADD(rxbds_empty);
11962         ESTAT_ADD(rx_discards);
11963         ESTAT_ADD(rx_errors);
11964         ESTAT_ADD(rx_threshold_hit);
11965
11966         ESTAT_ADD(dma_readq_full);
11967         ESTAT_ADD(dma_read_prioq_full);
11968         ESTAT_ADD(tx_comp_queue_full);
11969
11970         ESTAT_ADD(ring_set_send_prod_index);
11971         ESTAT_ADD(ring_status_update);
11972         ESTAT_ADD(nic_irqs);
11973         ESTAT_ADD(nic_avoided_irqs);
11974         ESTAT_ADD(nic_tx_threshold_hit);
11975
11976         ESTAT_ADD(mbuf_lwm_thresh_hit);
11977 }
11978
11979 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11980 {
11981         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11982         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11983         unsigned long rx_dropped;
11984         unsigned long tx_dropped;
11985         int i;
11986
11987         stats->rx_packets = old_stats->rx_packets +
11988                 get_stat64(&hw_stats->rx_ucast_packets) +
11989                 get_stat64(&hw_stats->rx_mcast_packets) +
11990                 get_stat64(&hw_stats->rx_bcast_packets);
11991
11992         stats->tx_packets = old_stats->tx_packets +
11993                 get_stat64(&hw_stats->tx_ucast_packets) +
11994                 get_stat64(&hw_stats->tx_mcast_packets) +
11995                 get_stat64(&hw_stats->tx_bcast_packets);
11996
11997         stats->rx_bytes = old_stats->rx_bytes +
11998                 get_stat64(&hw_stats->rx_octets);
11999         stats->tx_bytes = old_stats->tx_bytes +
12000                 get_stat64(&hw_stats->tx_octets);
12001
12002         stats->rx_errors = old_stats->rx_errors +
12003                 get_stat64(&hw_stats->rx_errors);
12004         stats->tx_errors = old_stats->tx_errors +
12005                 get_stat64(&hw_stats->tx_errors) +
12006                 get_stat64(&hw_stats->tx_mac_errors) +
12007                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
12008                 get_stat64(&hw_stats->tx_discards);
12009
12010         stats->multicast = old_stats->multicast +
12011                 get_stat64(&hw_stats->rx_mcast_packets);
12012         stats->collisions = old_stats->collisions +
12013                 get_stat64(&hw_stats->tx_collisions);
12014
12015         stats->rx_length_errors = old_stats->rx_length_errors +
12016                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
12017                 get_stat64(&hw_stats->rx_undersize_packets);
12018
12019         stats->rx_frame_errors = old_stats->rx_frame_errors +
12020                 get_stat64(&hw_stats->rx_align_errors);
12021         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
12022                 get_stat64(&hw_stats->tx_discards);
12023         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
12024                 get_stat64(&hw_stats->tx_carrier_sense_errors);
12025
12026         stats->rx_crc_errors = old_stats->rx_crc_errors +
12027                 tg3_calc_crc_errors(tp);
12028
12029         stats->rx_missed_errors = old_stats->rx_missed_errors +
12030                 get_stat64(&hw_stats->rx_discards);
12031
12032         /* Aggregate per-queue counters. The per-queue counters are updated
12033          * by a single writer, race-free. The result computed by this loop
12034          * might not be 100% accurate (counters can be updated in the middle of
12035          * the loop) but the next tg3_get_nstats() will recompute the current
12036          * value so it is acceptable.
12037          *
12038          * Note that these counters wrap around at 4G on 32bit machines.
12039          */
12040         rx_dropped = (unsigned long)(old_stats->rx_dropped);
12041         tx_dropped = (unsigned long)(old_stats->tx_dropped);
12042
12043         for (i = 0; i < tp->irq_cnt; i++) {
12044                 struct tg3_napi *tnapi = &tp->napi[i];
12045
12046                 rx_dropped += tnapi->rx_dropped;
12047                 tx_dropped += tnapi->tx_dropped;
12048         }
12049
12050         stats->rx_dropped = rx_dropped;
12051         stats->tx_dropped = tx_dropped;
12052 }
12053
12054 static int tg3_get_regs_len(struct net_device *dev)
12055 {
12056         return TG3_REG_BLK_SIZE;
12057 }
12058
12059 static void tg3_get_regs(struct net_device *dev,
12060                 struct ethtool_regs *regs, void *_p)
12061 {
12062         struct tg3 *tp = netdev_priv(dev);
12063
12064         regs->version = 0;
12065
12066         memset(_p, 0, TG3_REG_BLK_SIZE);
12067
12068         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12069                 return;
12070
12071         tg3_full_lock(tp, 0);
12072
12073         tg3_dump_legacy_regs(tp, (u32 *)_p);
12074
12075         tg3_full_unlock(tp);
12076 }
12077
12078 static int tg3_get_eeprom_len(struct net_device *dev)
12079 {
12080         struct tg3 *tp = netdev_priv(dev);
12081
12082         return tp->nvram_size;
12083 }
12084
12085 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12086 {
12087         struct tg3 *tp = netdev_priv(dev);
12088         int ret, cpmu_restore = 0;
12089         u8  *pd;
12090         u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
12091         __be32 val;
12092
12093         if (tg3_flag(tp, NO_NVRAM))
12094                 return -EINVAL;
12095
12096         offset = eeprom->offset;
12097         len = eeprom->len;
12098         eeprom->len = 0;
12099
12100         eeprom->magic = TG3_EEPROM_MAGIC;
12101
12102         /* Override clock, link aware and link idle modes */
12103         if (tg3_flag(tp, CPMU_PRESENT)) {
12104                 cpmu_val = tr32(TG3_CPMU_CTRL);
12105                 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
12106                                 CPMU_CTRL_LINK_IDLE_MODE)) {
12107                         tw32(TG3_CPMU_CTRL, cpmu_val &
12108                                             ~(CPMU_CTRL_LINK_AWARE_MODE |
12109                                              CPMU_CTRL_LINK_IDLE_MODE));
12110                         cpmu_restore = 1;
12111                 }
12112         }
12113         tg3_override_clk(tp);
12114
12115         if (offset & 3) {
12116                 /* adjustments to start on required 4 byte boundary */
12117                 b_offset = offset & 3;
12118                 b_count = 4 - b_offset;
12119                 if (b_count > len) {
12120                         /* i.e. offset=1 len=2 */
12121                         b_count = len;
12122                 }
12123                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12124                 if (ret)
12125                         goto eeprom_done;
12126                 memcpy(data, ((char *)&val) + b_offset, b_count);
12127                 len -= b_count;
12128                 offset += b_count;
12129                 eeprom->len += b_count;
12130         }
12131
12132         /* read bytes up to the last 4 byte boundary */
12133         pd = &data[eeprom->len];
12134         for (i = 0; i < (len - (len & 3)); i += 4) {
12135                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12136                 if (ret) {
12137                         if (i)
12138                                 i -= 4;
12139                         eeprom->len += i;
12140                         goto eeprom_done;
12141                 }
12142                 memcpy(pd + i, &val, 4);
12143                 if (need_resched()) {
12144                         if (signal_pending(current)) {
12145                                 eeprom->len += i;
12146                                 ret = -EINTR;
12147                                 goto eeprom_done;
12148                         }
12149                         cond_resched();
12150                 }
12151         }
12152         eeprom->len += i;
12153
12154         if (len & 3) {
12155                 /* read last bytes not ending on 4 byte boundary */
12156                 pd = &data[eeprom->len];
12157                 b_count = len & 3;
12158                 b_offset = offset + len - b_count;
12159                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12160                 if (ret)
12161                         goto eeprom_done;
12162                 memcpy(pd, &val, b_count);
12163                 eeprom->len += b_count;
12164         }
12165         ret = 0;
12166
12167 eeprom_done:
12168         /* Restore clock, link aware and link idle modes */
12169         tg3_restore_clk(tp);
12170         if (cpmu_restore)
12171                 tw32(TG3_CPMU_CTRL, cpmu_val);
12172
12173         return ret;
12174 }
12175
12176 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12177 {
12178         struct tg3 *tp = netdev_priv(dev);
12179         int ret;
12180         u32 offset, len, b_offset, odd_len;
12181         u8 *buf;
12182         __be32 start = 0, end;
12183
12184         if (tg3_flag(tp, NO_NVRAM) ||
12185             eeprom->magic != TG3_EEPROM_MAGIC)
12186                 return -EINVAL;
12187
12188         offset = eeprom->offset;
12189         len = eeprom->len;
12190
12191         if ((b_offset = (offset & 3))) {
12192                 /* adjustments to start on required 4 byte boundary */
12193                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12194                 if (ret)
12195                         return ret;
12196                 len += b_offset;
12197                 offset &= ~3;
12198                 if (len < 4)
12199                         len = 4;
12200         }
12201
12202         odd_len = 0;
12203         if (len & 3) {
12204                 /* adjustments to end on required 4 byte boundary */
12205                 odd_len = 1;
12206                 len = (len + 3) & ~3;
12207                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12208                 if (ret)
12209                         return ret;
12210         }
12211
12212         buf = data;
12213         if (b_offset || odd_len) {
12214                 buf = kmalloc(len, GFP_KERNEL);
12215                 if (!buf)
12216                         return -ENOMEM;
12217                 if (b_offset)
12218                         memcpy(buf, &start, 4);
12219                 if (odd_len)
12220                         memcpy(buf+len-4, &end, 4);
12221                 memcpy(buf + b_offset, data, eeprom->len);
12222         }
12223
12224         ret = tg3_nvram_write_block(tp, offset, len, buf);
12225
12226         if (buf != data)
12227                 kfree(buf);
12228
12229         return ret;
12230 }
12231
12232 static int tg3_get_link_ksettings(struct net_device *dev,
12233                                   struct ethtool_link_ksettings *cmd)
12234 {
12235         struct tg3 *tp = netdev_priv(dev);
12236         u32 supported, advertising;
12237
12238         if (tg3_flag(tp, USE_PHYLIB)) {
12239                 struct phy_device *phydev;
12240                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12241                         return -EAGAIN;
12242                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12243                 phy_ethtool_ksettings_get(phydev, cmd);
12244
12245                 return 0;
12246         }
12247
12248         supported = (SUPPORTED_Autoneg);
12249
12250         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12251                 supported |= (SUPPORTED_1000baseT_Half |
12252                               SUPPORTED_1000baseT_Full);
12253
12254         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12255                 supported |= (SUPPORTED_100baseT_Half |
12256                               SUPPORTED_100baseT_Full |
12257                               SUPPORTED_10baseT_Half |
12258                               SUPPORTED_10baseT_Full |
12259                               SUPPORTED_TP);
12260                 cmd->base.port = PORT_TP;
12261         } else {
12262                 supported |= SUPPORTED_FIBRE;
12263                 cmd->base.port = PORT_FIBRE;
12264         }
12265         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12266                                                 supported);
12267
12268         advertising = tp->link_config.advertising;
12269         if (tg3_flag(tp, PAUSE_AUTONEG)) {
12270                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12271                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12272                                 advertising |= ADVERTISED_Pause;
12273                         } else {
12274                                 advertising |= ADVERTISED_Pause |
12275                                         ADVERTISED_Asym_Pause;
12276                         }
12277                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12278                         advertising |= ADVERTISED_Asym_Pause;
12279                 }
12280         }
12281         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12282                                                 advertising);
12283
12284         if (netif_running(dev) && tp->link_up) {
12285                 cmd->base.speed = tp->link_config.active_speed;
12286                 cmd->base.duplex = tp->link_config.active_duplex;
12287                 ethtool_convert_legacy_u32_to_link_mode(
12288                         cmd->link_modes.lp_advertising,
12289                         tp->link_config.rmt_adv);
12290
12291                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12292                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12293                                 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12294                         else
12295                                 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12296                 }
12297         } else {
12298                 cmd->base.speed = SPEED_UNKNOWN;
12299                 cmd->base.duplex = DUPLEX_UNKNOWN;
12300                 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12301         }
12302         cmd->base.phy_address = tp->phy_addr;
12303         cmd->base.autoneg = tp->link_config.autoneg;
12304         return 0;
12305 }
12306
12307 static int tg3_set_link_ksettings(struct net_device *dev,
12308                                   const struct ethtool_link_ksettings *cmd)
12309 {
12310         struct tg3 *tp = netdev_priv(dev);
12311         u32 speed = cmd->base.speed;
12312         u32 advertising;
12313
12314         if (tg3_flag(tp, USE_PHYLIB)) {
12315                 struct phy_device *phydev;
12316                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12317                         return -EAGAIN;
12318                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12319                 return phy_ethtool_ksettings_set(phydev, cmd);
12320         }
12321
12322         if (cmd->base.autoneg != AUTONEG_ENABLE &&
12323             cmd->base.autoneg != AUTONEG_DISABLE)
12324                 return -EINVAL;
12325
12326         if (cmd->base.autoneg == AUTONEG_DISABLE &&
12327             cmd->base.duplex != DUPLEX_FULL &&
12328             cmd->base.duplex != DUPLEX_HALF)
12329                 return -EINVAL;
12330
12331         ethtool_convert_link_mode_to_legacy_u32(&advertising,
12332                                                 cmd->link_modes.advertising);
12333
12334         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12335                 u32 mask = ADVERTISED_Autoneg |
12336                            ADVERTISED_Pause |
12337                            ADVERTISED_Asym_Pause;
12338
12339                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12340                         mask |= ADVERTISED_1000baseT_Half |
12341                                 ADVERTISED_1000baseT_Full;
12342
12343                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12344                         mask |= ADVERTISED_100baseT_Half |
12345                                 ADVERTISED_100baseT_Full |
12346                                 ADVERTISED_10baseT_Half |
12347                                 ADVERTISED_10baseT_Full |
12348                                 ADVERTISED_TP;
12349                 else
12350                         mask |= ADVERTISED_FIBRE;
12351
12352                 if (advertising & ~mask)
12353                         return -EINVAL;
12354
12355                 mask &= (ADVERTISED_1000baseT_Half |
12356                          ADVERTISED_1000baseT_Full |
12357                          ADVERTISED_100baseT_Half |
12358                          ADVERTISED_100baseT_Full |
12359                          ADVERTISED_10baseT_Half |
12360                          ADVERTISED_10baseT_Full);
12361
12362                 advertising &= mask;
12363         } else {
12364                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12365                         if (speed != SPEED_1000)
12366                                 return -EINVAL;
12367
12368                         if (cmd->base.duplex != DUPLEX_FULL)
12369                                 return -EINVAL;
12370                 } else {
12371                         if (speed != SPEED_100 &&
12372                             speed != SPEED_10)
12373                                 return -EINVAL;
12374                 }
12375         }
12376
12377         tg3_full_lock(tp, 0);
12378
12379         tp->link_config.autoneg = cmd->base.autoneg;
12380         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12381                 tp->link_config.advertising = (advertising |
12382                                               ADVERTISED_Autoneg);
12383                 tp->link_config.speed = SPEED_UNKNOWN;
12384                 tp->link_config.duplex = DUPLEX_UNKNOWN;
12385         } else {
12386                 tp->link_config.advertising = 0;
12387                 tp->link_config.speed = speed;
12388                 tp->link_config.duplex = cmd->base.duplex;
12389         }
12390
12391         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12392
12393         tg3_warn_mgmt_link_flap(tp);
12394
12395         if (netif_running(dev))
12396                 tg3_setup_phy(tp, true);
12397
12398         tg3_full_unlock(tp);
12399
12400         return 0;
12401 }
12402
12403 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12404 {
12405         struct tg3 *tp = netdev_priv(dev);
12406
12407         strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12408         strscpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12409         strscpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12410 }
12411
12412 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12413 {
12414         struct tg3 *tp = netdev_priv(dev);
12415
12416         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12417                 wol->supported = WAKE_MAGIC;
12418         else
12419                 wol->supported = 0;
12420         wol->wolopts = 0;
12421         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12422                 wol->wolopts = WAKE_MAGIC;
12423         memset(&wol->sopass, 0, sizeof(wol->sopass));
12424 }
12425
12426 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12427 {
12428         struct tg3 *tp = netdev_priv(dev);
12429         struct device *dp = &tp->pdev->dev;
12430
12431         if (wol->wolopts & ~WAKE_MAGIC)
12432                 return -EINVAL;
12433         if ((wol->wolopts & WAKE_MAGIC) &&
12434             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12435                 return -EINVAL;
12436
12437         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12438
12439         if (device_may_wakeup(dp))
12440                 tg3_flag_set(tp, WOL_ENABLE);
12441         else
12442                 tg3_flag_clear(tp, WOL_ENABLE);
12443
12444         return 0;
12445 }
12446
12447 static u32 tg3_get_msglevel(struct net_device *dev)
12448 {
12449         struct tg3 *tp = netdev_priv(dev);
12450         return tp->msg_enable;
12451 }
12452
12453 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12454 {
12455         struct tg3 *tp = netdev_priv(dev);
12456         tp->msg_enable = value;
12457 }
12458
12459 static int tg3_nway_reset(struct net_device *dev)
12460 {
12461         struct tg3 *tp = netdev_priv(dev);
12462         int r;
12463
12464         if (!netif_running(dev))
12465                 return -EAGAIN;
12466
12467         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12468                 return -EINVAL;
12469
12470         tg3_warn_mgmt_link_flap(tp);
12471
12472         if (tg3_flag(tp, USE_PHYLIB)) {
12473                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12474                         return -EAGAIN;
12475                 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12476         } else {
12477                 u32 bmcr;
12478
12479                 spin_lock_bh(&tp->lock);
12480                 r = -EINVAL;
12481                 tg3_readphy(tp, MII_BMCR, &bmcr);
12482                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12483                     ((bmcr & BMCR_ANENABLE) ||
12484                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12485                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12486                                                    BMCR_ANENABLE);
12487                         r = 0;
12488                 }
12489                 spin_unlock_bh(&tp->lock);
12490         }
12491
12492         return r;
12493 }
12494
12495 static void tg3_get_ringparam(struct net_device *dev,
12496                               struct ethtool_ringparam *ering,
12497                               struct kernel_ethtool_ringparam *kernel_ering,
12498                               struct netlink_ext_ack *extack)
12499 {
12500         struct tg3 *tp = netdev_priv(dev);
12501
12502         ering->rx_max_pending = tp->rx_std_ring_mask;
12503         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12504                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12505         else
12506                 ering->rx_jumbo_max_pending = 0;
12507
12508         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12509
12510         ering->rx_pending = tp->rx_pending;
12511         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12512                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12513         else
12514                 ering->rx_jumbo_pending = 0;
12515
12516         ering->tx_pending = tp->napi[0].tx_pending;
12517 }
12518
12519 static int tg3_set_ringparam(struct net_device *dev,
12520                              struct ethtool_ringparam *ering,
12521                              struct kernel_ethtool_ringparam *kernel_ering,
12522                              struct netlink_ext_ack *extack)
12523 {
12524         struct tg3 *tp = netdev_priv(dev);
12525         int i, irq_sync = 0, err = 0;
12526         bool reset_phy = false;
12527
12528         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12529             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12530             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12531             (ering->tx_pending <= MAX_SKB_FRAGS) ||
12532             (tg3_flag(tp, TSO_BUG) &&
12533              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12534                 return -EINVAL;
12535
12536         if (netif_running(dev)) {
12537                 tg3_phy_stop(tp);
12538                 tg3_netif_stop(tp);
12539                 irq_sync = 1;
12540         }
12541
12542         tg3_full_lock(tp, irq_sync);
12543
12544         tp->rx_pending = ering->rx_pending;
12545
12546         if (tg3_flag(tp, MAX_RXPEND_64) &&
12547             tp->rx_pending > 63)
12548                 tp->rx_pending = 63;
12549
12550         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12551                 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12552
12553         for (i = 0; i < tp->irq_max; i++)
12554                 tp->napi[i].tx_pending = ering->tx_pending;
12555
12556         if (netif_running(dev)) {
12557                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12558                 /* Reset PHY to avoid PHY lock up */
12559                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12560                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
12561                     tg3_asic_rev(tp) == ASIC_REV_5720)
12562                         reset_phy = true;
12563
12564                 err = tg3_restart_hw(tp, reset_phy);
12565                 if (!err)
12566                         tg3_netif_start(tp);
12567         }
12568
12569         tg3_full_unlock(tp);
12570
12571         if (irq_sync && !err)
12572                 tg3_phy_start(tp);
12573
12574         return err;
12575 }
12576
12577 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12578 {
12579         struct tg3 *tp = netdev_priv(dev);
12580
12581         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12582
12583         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12584                 epause->rx_pause = 1;
12585         else
12586                 epause->rx_pause = 0;
12587
12588         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12589                 epause->tx_pause = 1;
12590         else
12591                 epause->tx_pause = 0;
12592 }
12593
12594 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12595 {
12596         struct tg3 *tp = netdev_priv(dev);
12597         int err = 0;
12598         bool reset_phy = false;
12599
12600         if (tp->link_config.autoneg == AUTONEG_ENABLE)
12601                 tg3_warn_mgmt_link_flap(tp);
12602
12603         if (tg3_flag(tp, USE_PHYLIB)) {
12604                 struct phy_device *phydev;
12605
12606                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12607
12608                 if (!phy_validate_pause(phydev, epause))
12609                         return -EINVAL;
12610
12611                 tp->link_config.flowctrl = 0;
12612                 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
12613                 if (epause->rx_pause) {
12614                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12615
12616                         if (epause->tx_pause) {
12617                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12618                         }
12619                 } else if (epause->tx_pause) {
12620                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12621                 }
12622
12623                 if (epause->autoneg)
12624                         tg3_flag_set(tp, PAUSE_AUTONEG);
12625                 else
12626                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12627
12628                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12629                         if (phydev->autoneg) {
12630                                 /* phy_set_asym_pause() will
12631                                  * renegotiate the link to inform our
12632                                  * link partner of our flow control
12633                                  * settings, even if the flow control
12634                                  * is forced.  Let tg3_adjust_link()
12635                                  * do the final flow control setup.
12636                                  */
12637                                 return 0;
12638                         }
12639
12640                         if (!epause->autoneg)
12641                                 tg3_setup_flow_control(tp, 0, 0);
12642                 }
12643         } else {
12644                 int irq_sync = 0;
12645
12646                 if (netif_running(dev)) {
12647                         tg3_netif_stop(tp);
12648                         irq_sync = 1;
12649                 }
12650
12651                 tg3_full_lock(tp, irq_sync);
12652
12653                 if (epause->autoneg)
12654                         tg3_flag_set(tp, PAUSE_AUTONEG);
12655                 else
12656                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12657                 if (epause->rx_pause)
12658                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12659                 else
12660                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12661                 if (epause->tx_pause)
12662                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12663                 else
12664                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12665
12666                 if (netif_running(dev)) {
12667                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12668                         /* Reset PHY to avoid PHY lock up */
12669                         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12670                             tg3_asic_rev(tp) == ASIC_REV_5719 ||
12671                             tg3_asic_rev(tp) == ASIC_REV_5720)
12672                                 reset_phy = true;
12673
12674                         err = tg3_restart_hw(tp, reset_phy);
12675                         if (!err)
12676                                 tg3_netif_start(tp);
12677                 }
12678
12679                 tg3_full_unlock(tp);
12680         }
12681
12682         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12683
12684         return err;
12685 }
12686
12687 static int tg3_get_sset_count(struct net_device *dev, int sset)
12688 {
12689         switch (sset) {
12690         case ETH_SS_TEST:
12691                 return TG3_NUM_TEST;
12692         case ETH_SS_STATS:
12693                 return TG3_NUM_STATS;
12694         default:
12695                 return -EOPNOTSUPP;
12696         }
12697 }
12698
12699 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12700                          u32 *rules __always_unused)
12701 {
12702         struct tg3 *tp = netdev_priv(dev);
12703
12704         if (!tg3_flag(tp, SUPPORT_MSIX))
12705                 return -EOPNOTSUPP;
12706
12707         switch (info->cmd) {
12708         case ETHTOOL_GRXRINGS:
12709                 if (netif_running(tp->dev))
12710                         info->data = tp->rxq_cnt;
12711                 else {
12712                         info->data = num_online_cpus();
12713                         if (info->data > TG3_RSS_MAX_NUM_QS)
12714                                 info->data = TG3_RSS_MAX_NUM_QS;
12715                 }
12716
12717                 return 0;
12718
12719         default:
12720                 return -EOPNOTSUPP;
12721         }
12722 }
12723
12724 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12725 {
12726         u32 size = 0;
12727         struct tg3 *tp = netdev_priv(dev);
12728
12729         if (tg3_flag(tp, SUPPORT_MSIX))
12730                 size = TG3_RSS_INDIR_TBL_SIZE;
12731
12732         return size;
12733 }
12734
12735 static int tg3_get_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh)
12736 {
12737         struct tg3 *tp = netdev_priv(dev);
12738         int i;
12739
12740         rxfh->hfunc = ETH_RSS_HASH_TOP;
12741         if (!rxfh->indir)
12742                 return 0;
12743
12744         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12745                 rxfh->indir[i] = tp->rss_ind_tbl[i];
12746
12747         return 0;
12748 }
12749
12750 static int tg3_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
12751                         struct netlink_ext_ack *extack)
12752 {
12753         struct tg3 *tp = netdev_priv(dev);
12754         size_t i;
12755
12756         /* We require at least one supported parameter to be changed and no
12757          * change in any of the unsupported parameters
12758          */
12759         if (rxfh->key ||
12760             (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
12761              rxfh->hfunc != ETH_RSS_HASH_TOP))
12762                 return -EOPNOTSUPP;
12763
12764         if (!rxfh->indir)
12765                 return 0;
12766
12767         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12768                 tp->rss_ind_tbl[i] = rxfh->indir[i];
12769
12770         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12771                 return 0;
12772
12773         /* It is legal to write the indirection
12774          * table while the device is running.
12775          */
12776         tg3_full_lock(tp, 0);
12777         tg3_rss_write_indir_tbl(tp);
12778         tg3_full_unlock(tp);
12779
12780         return 0;
12781 }
12782
12783 static void tg3_get_channels(struct net_device *dev,
12784                              struct ethtool_channels *channel)
12785 {
12786         struct tg3 *tp = netdev_priv(dev);
12787         u32 deflt_qs = netif_get_num_default_rss_queues();
12788
12789         channel->max_rx = tp->rxq_max;
12790         channel->max_tx = tp->txq_max;
12791
12792         if (netif_running(dev)) {
12793                 channel->rx_count = tp->rxq_cnt;
12794                 channel->tx_count = tp->txq_cnt;
12795         } else {
12796                 if (tp->rxq_req)
12797                         channel->rx_count = tp->rxq_req;
12798                 else
12799                         channel->rx_count = min(deflt_qs, tp->rxq_max);
12800
12801                 if (tp->txq_req)
12802                         channel->tx_count = tp->txq_req;
12803                 else
12804                         channel->tx_count = min(deflt_qs, tp->txq_max);
12805         }
12806 }
12807
12808 static int tg3_set_channels(struct net_device *dev,
12809                             struct ethtool_channels *channel)
12810 {
12811         struct tg3 *tp = netdev_priv(dev);
12812
12813         if (!tg3_flag(tp, SUPPORT_MSIX))
12814                 return -EOPNOTSUPP;
12815
12816         if (channel->rx_count > tp->rxq_max ||
12817             channel->tx_count > tp->txq_max)
12818                 return -EINVAL;
12819
12820         tp->rxq_req = channel->rx_count;
12821         tp->txq_req = channel->tx_count;
12822
12823         if (!netif_running(dev))
12824                 return 0;
12825
12826         tg3_stop(tp);
12827
12828         tg3_carrier_off(tp);
12829
12830         tg3_start(tp, true, false, false);
12831
12832         return 0;
12833 }
12834
12835 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12836 {
12837         switch (stringset) {
12838         case ETH_SS_STATS:
12839                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12840                 break;
12841         case ETH_SS_TEST:
12842                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12843                 break;
12844         default:
12845                 WARN_ON(1);     /* we need a WARN() */
12846                 break;
12847         }
12848 }
12849
12850 static int tg3_set_phys_id(struct net_device *dev,
12851                             enum ethtool_phys_id_state state)
12852 {
12853         struct tg3 *tp = netdev_priv(dev);
12854
12855         switch (state) {
12856         case ETHTOOL_ID_ACTIVE:
12857                 return 1;       /* cycle on/off once per second */
12858
12859         case ETHTOOL_ID_ON:
12860                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12861                      LED_CTRL_1000MBPS_ON |
12862                      LED_CTRL_100MBPS_ON |
12863                      LED_CTRL_10MBPS_ON |
12864                      LED_CTRL_TRAFFIC_OVERRIDE |
12865                      LED_CTRL_TRAFFIC_BLINK |
12866                      LED_CTRL_TRAFFIC_LED);
12867                 break;
12868
12869         case ETHTOOL_ID_OFF:
12870                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12871                      LED_CTRL_TRAFFIC_OVERRIDE);
12872                 break;
12873
12874         case ETHTOOL_ID_INACTIVE:
12875                 tw32(MAC_LED_CTRL, tp->led_ctrl);
12876                 break;
12877         }
12878
12879         return 0;
12880 }
12881
12882 static void tg3_get_ethtool_stats(struct net_device *dev,
12883                                    struct ethtool_stats *estats, u64 *tmp_stats)
12884 {
12885         struct tg3 *tp = netdev_priv(dev);
12886
12887         if (tp->hw_stats)
12888                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12889         else
12890                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12891 }
12892
12893 static __be32 *tg3_vpd_readblock(struct tg3 *tp, unsigned int *vpdlen)
12894 {
12895         int i;
12896         __be32 *buf;
12897         u32 offset = 0, len = 0;
12898         u32 magic, val;
12899
12900         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12901                 return NULL;
12902
12903         if (magic == TG3_EEPROM_MAGIC) {
12904                 for (offset = TG3_NVM_DIR_START;
12905                      offset < TG3_NVM_DIR_END;
12906                      offset += TG3_NVM_DIRENT_SIZE) {
12907                         if (tg3_nvram_read(tp, offset, &val))
12908                                 return NULL;
12909
12910                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12911                             TG3_NVM_DIRTYPE_EXTVPD)
12912                                 break;
12913                 }
12914
12915                 if (offset != TG3_NVM_DIR_END) {
12916                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12917                         if (tg3_nvram_read(tp, offset + 4, &offset))
12918                                 return NULL;
12919
12920                         offset = tg3_nvram_logical_addr(tp, offset);
12921                 }
12922
12923                 if (!offset || !len) {
12924                         offset = TG3_NVM_VPD_OFF;
12925                         len = TG3_NVM_VPD_LEN;
12926                 }
12927
12928                 buf = kmalloc(len, GFP_KERNEL);
12929                 if (!buf)
12930                         return NULL;
12931
12932                 for (i = 0; i < len; i += 4) {
12933                         /* The data is in little-endian format in NVRAM.
12934                          * Use the big-endian read routines to preserve
12935                          * the byte order as it exists in NVRAM.
12936                          */
12937                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12938                                 goto error;
12939                 }
12940                 *vpdlen = len;
12941         } else {
12942                 buf = pci_vpd_alloc(tp->pdev, vpdlen);
12943                 if (IS_ERR(buf))
12944                         return NULL;
12945         }
12946
12947         return buf;
12948
12949 error:
12950         kfree(buf);
12951         return NULL;
12952 }
12953
12954 #define NVRAM_TEST_SIZE 0x100
12955 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
12956 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
12957 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
12958 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
12959 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
12960 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
12961 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12962 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12963
12964 static int tg3_test_nvram(struct tg3 *tp)
12965 {
12966         u32 csum, magic;
12967         __be32 *buf;
12968         int i, j, k, err = 0, size;
12969         unsigned int len;
12970
12971         if (tg3_flag(tp, NO_NVRAM))
12972                 return 0;
12973
12974         if (tg3_nvram_read(tp, 0, &magic) != 0)
12975                 return -EIO;
12976
12977         if (magic == TG3_EEPROM_MAGIC)
12978                 size = NVRAM_TEST_SIZE;
12979         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12980                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12981                     TG3_EEPROM_SB_FORMAT_1) {
12982                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12983                         case TG3_EEPROM_SB_REVISION_0:
12984                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12985                                 break;
12986                         case TG3_EEPROM_SB_REVISION_2:
12987                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12988                                 break;
12989                         case TG3_EEPROM_SB_REVISION_3:
12990                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12991                                 break;
12992                         case TG3_EEPROM_SB_REVISION_4:
12993                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12994                                 break;
12995                         case TG3_EEPROM_SB_REVISION_5:
12996                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12997                                 break;
12998                         case TG3_EEPROM_SB_REVISION_6:
12999                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
13000                                 break;
13001                         default:
13002                                 return -EIO;
13003                         }
13004                 } else
13005                         return 0;
13006         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13007                 size = NVRAM_SELFBOOT_HW_SIZE;
13008         else
13009                 return -EIO;
13010
13011         buf = kmalloc(size, GFP_KERNEL);
13012         if (buf == NULL)
13013                 return -ENOMEM;
13014
13015         err = -EIO;
13016         for (i = 0, j = 0; i < size; i += 4, j++) {
13017                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
13018                 if (err)
13019                         break;
13020         }
13021         if (i < size)
13022                 goto out;
13023
13024         /* Selfboot format */
13025         magic = be32_to_cpu(buf[0]);
13026         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
13027             TG3_EEPROM_MAGIC_FW) {
13028                 u8 *buf8 = (u8 *) buf, csum8 = 0;
13029
13030                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
13031                     TG3_EEPROM_SB_REVISION_2) {
13032                         /* For rev 2, the csum doesn't include the MBA. */
13033                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
13034                                 csum8 += buf8[i];
13035                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
13036                                 csum8 += buf8[i];
13037                 } else {
13038                         for (i = 0; i < size; i++)
13039                                 csum8 += buf8[i];
13040                 }
13041
13042                 if (csum8 == 0) {
13043                         err = 0;
13044                         goto out;
13045                 }
13046
13047                 err = -EIO;
13048                 goto out;
13049         }
13050
13051         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
13052             TG3_EEPROM_MAGIC_HW) {
13053                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
13054                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
13055                 u8 *buf8 = (u8 *) buf;
13056
13057                 /* Separate the parity bits and the data bytes.  */
13058                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
13059                         if ((i == 0) || (i == 8)) {
13060                                 int l;
13061                                 u8 msk;
13062
13063                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
13064                                         parity[k++] = buf8[i] & msk;
13065                                 i++;
13066                         } else if (i == 16) {
13067                                 int l;
13068                                 u8 msk;
13069
13070                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
13071                                         parity[k++] = buf8[i] & msk;
13072                                 i++;
13073
13074                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
13075                                         parity[k++] = buf8[i] & msk;
13076                                 i++;
13077                         }
13078                         data[j++] = buf8[i];
13079                 }
13080
13081                 err = -EIO;
13082                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
13083                         u8 hw8 = hweight8(data[i]);
13084
13085                         if ((hw8 & 0x1) && parity[i])
13086                                 goto out;
13087                         else if (!(hw8 & 0x1) && !parity[i])
13088                                 goto out;
13089                 }
13090                 err = 0;
13091                 goto out;
13092         }
13093
13094         err = -EIO;
13095
13096         /* Bootstrap checksum at offset 0x10 */
13097         csum = calc_crc((unsigned char *) buf, 0x10);
13098         if (csum != le32_to_cpu(buf[0x10/4]))
13099                 goto out;
13100
13101         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13102         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
13103         if (csum != le32_to_cpu(buf[0xfc/4]))
13104                 goto out;
13105
13106         kfree(buf);
13107
13108         buf = tg3_vpd_readblock(tp, &len);
13109         if (!buf)
13110                 return -ENOMEM;
13111
13112         err = pci_vpd_check_csum(buf, len);
13113         /* go on if no checksum found */
13114         if (err == 1)
13115                 err = 0;
13116 out:
13117         kfree(buf);
13118         return err;
13119 }
13120
13121 #define TG3_SERDES_TIMEOUT_SEC  2
13122 #define TG3_COPPER_TIMEOUT_SEC  6
13123
13124 static int tg3_test_link(struct tg3 *tp)
13125 {
13126         int i, max;
13127
13128         if (!netif_running(tp->dev))
13129                 return -ENODEV;
13130
13131         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13132                 max = TG3_SERDES_TIMEOUT_SEC;
13133         else
13134                 max = TG3_COPPER_TIMEOUT_SEC;
13135
13136         for (i = 0; i < max; i++) {
13137                 if (tp->link_up)
13138                         return 0;
13139
13140                 if (msleep_interruptible(1000))
13141                         break;
13142         }
13143
13144         return -EIO;
13145 }
13146
13147 /* Only test the commonly used registers */
13148 static int tg3_test_registers(struct tg3 *tp)
13149 {
13150         int i, is_5705, is_5750;
13151         u32 offset, read_mask, write_mask, val, save_val, read_val;
13152         static struct {
13153                 u16 offset;
13154                 u16 flags;
13155 #define TG3_FL_5705     0x1
13156 #define TG3_FL_NOT_5705 0x2
13157 #define TG3_FL_NOT_5788 0x4
13158 #define TG3_FL_NOT_5750 0x8
13159                 u32 read_mask;
13160                 u32 write_mask;
13161         } reg_tbl[] = {
13162                 /* MAC Control Registers */
13163                 { MAC_MODE, TG3_FL_NOT_5705,
13164                         0x00000000, 0x00ef6f8c },
13165                 { MAC_MODE, TG3_FL_5705,
13166                         0x00000000, 0x01ef6b8c },
13167                 { MAC_STATUS, TG3_FL_NOT_5705,
13168                         0x03800107, 0x00000000 },
13169                 { MAC_STATUS, TG3_FL_5705,
13170                         0x03800100, 0x00000000 },
13171                 { MAC_ADDR_0_HIGH, 0x0000,
13172                         0x00000000, 0x0000ffff },
13173                 { MAC_ADDR_0_LOW, 0x0000,
13174                         0x00000000, 0xffffffff },
13175                 { MAC_RX_MTU_SIZE, 0x0000,
13176                         0x00000000, 0x0000ffff },
13177                 { MAC_TX_MODE, 0x0000,
13178                         0x00000000, 0x00000070 },
13179                 { MAC_TX_LENGTHS, 0x0000,
13180                         0x00000000, 0x00003fff },
13181                 { MAC_RX_MODE, TG3_FL_NOT_5705,
13182                         0x00000000, 0x000007fc },
13183                 { MAC_RX_MODE, TG3_FL_5705,
13184                         0x00000000, 0x000007dc },
13185                 { MAC_HASH_REG_0, 0x0000,
13186                         0x00000000, 0xffffffff },
13187                 { MAC_HASH_REG_1, 0x0000,
13188                         0x00000000, 0xffffffff },
13189                 { MAC_HASH_REG_2, 0x0000,
13190                         0x00000000, 0xffffffff },
13191                 { MAC_HASH_REG_3, 0x0000,
13192                         0x00000000, 0xffffffff },
13193
13194                 /* Receive Data and Receive BD Initiator Control Registers. */
13195                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13196                         0x00000000, 0xffffffff },
13197                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13198                         0x00000000, 0xffffffff },
13199                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13200                         0x00000000, 0x00000003 },
13201                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13202                         0x00000000, 0xffffffff },
13203                 { RCVDBDI_STD_BD+0, 0x0000,
13204                         0x00000000, 0xffffffff },
13205                 { RCVDBDI_STD_BD+4, 0x0000,
13206                         0x00000000, 0xffffffff },
13207                 { RCVDBDI_STD_BD+8, 0x0000,
13208                         0x00000000, 0xffff0002 },
13209                 { RCVDBDI_STD_BD+0xc, 0x0000,
13210                         0x00000000, 0xffffffff },
13211
13212                 /* Receive BD Initiator Control Registers. */
13213                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13214                         0x00000000, 0xffffffff },
13215                 { RCVBDI_STD_THRESH, TG3_FL_5705,
13216                         0x00000000, 0x000003ff },
13217                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13218                         0x00000000, 0xffffffff },
13219
13220                 /* Host Coalescing Control Registers. */
13221                 { HOSTCC_MODE, TG3_FL_NOT_5705,
13222                         0x00000000, 0x00000004 },
13223                 { HOSTCC_MODE, TG3_FL_5705,
13224                         0x00000000, 0x000000f6 },
13225                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13226                         0x00000000, 0xffffffff },
13227                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13228                         0x00000000, 0x000003ff },
13229                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13230                         0x00000000, 0xffffffff },
13231                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13232                         0x00000000, 0x000003ff },
13233                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13234                         0x00000000, 0xffffffff },
13235                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13236                         0x00000000, 0x000000ff },
13237                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13238                         0x00000000, 0xffffffff },
13239                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13240                         0x00000000, 0x000000ff },
13241                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13242                         0x00000000, 0xffffffff },
13243                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13244                         0x00000000, 0xffffffff },
13245                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13246                         0x00000000, 0xffffffff },
13247                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13248                         0x00000000, 0x000000ff },
13249                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13250                         0x00000000, 0xffffffff },
13251                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13252                         0x00000000, 0x000000ff },
13253                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13254                         0x00000000, 0xffffffff },
13255                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13256                         0x00000000, 0xffffffff },
13257                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13258                         0x00000000, 0xffffffff },
13259                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13260                         0x00000000, 0xffffffff },
13261                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13262                         0x00000000, 0xffffffff },
13263                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13264                         0xffffffff, 0x00000000 },
13265                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13266                         0xffffffff, 0x00000000 },
13267
13268                 /* Buffer Manager Control Registers. */
13269                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13270                         0x00000000, 0x007fff80 },
13271                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13272                         0x00000000, 0x007fffff },
13273                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13274                         0x00000000, 0x0000003f },
13275                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13276                         0x00000000, 0x000001ff },
13277                 { BUFMGR_MB_HIGH_WATER, 0x0000,
13278                         0x00000000, 0x000001ff },
13279                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13280                         0xffffffff, 0x00000000 },
13281                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13282                         0xffffffff, 0x00000000 },
13283
13284                 /* Mailbox Registers */
13285                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13286                         0x00000000, 0x000001ff },
13287                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13288                         0x00000000, 0x000001ff },
13289                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13290                         0x00000000, 0x000007ff },
13291                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13292                         0x00000000, 0x000001ff },
13293
13294                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13295         };
13296
13297         is_5705 = is_5750 = 0;
13298         if (tg3_flag(tp, 5705_PLUS)) {
13299                 is_5705 = 1;
13300                 if (tg3_flag(tp, 5750_PLUS))
13301                         is_5750 = 1;
13302         }
13303
13304         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13305                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13306                         continue;
13307
13308                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13309                         continue;
13310
13311                 if (tg3_flag(tp, IS_5788) &&
13312                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
13313                         continue;
13314
13315                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13316                         continue;
13317
13318                 offset = (u32) reg_tbl[i].offset;
13319                 read_mask = reg_tbl[i].read_mask;
13320                 write_mask = reg_tbl[i].write_mask;
13321
13322                 /* Save the original register content */
13323                 save_val = tr32(offset);
13324
13325                 /* Determine the read-only value. */
13326                 read_val = save_val & read_mask;
13327
13328                 /* Write zero to the register, then make sure the read-only bits
13329                  * are not changed and the read/write bits are all zeros.
13330                  */
13331                 tw32(offset, 0);
13332
13333                 val = tr32(offset);
13334
13335                 /* Test the read-only and read/write bits. */
13336                 if (((val & read_mask) != read_val) || (val & write_mask))
13337                         goto out;
13338
13339                 /* Write ones to all the bits defined by RdMask and WrMask, then
13340                  * make sure the read-only bits are not changed and the
13341                  * read/write bits are all ones.
13342                  */
13343                 tw32(offset, read_mask | write_mask);
13344
13345                 val = tr32(offset);
13346
13347                 /* Test the read-only bits. */
13348                 if ((val & read_mask) != read_val)
13349                         goto out;
13350
13351                 /* Test the read/write bits. */
13352                 if ((val & write_mask) != write_mask)
13353                         goto out;
13354
13355                 tw32(offset, save_val);
13356         }
13357
13358         return 0;
13359
13360 out:
13361         if (netif_msg_hw(tp))
13362                 netdev_err(tp->dev,
13363                            "Register test failed at offset %x\n", offset);
13364         tw32(offset, save_val);
13365         return -EIO;
13366 }
13367
13368 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13369 {
13370         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13371         int i;
13372         u32 j;
13373
13374         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13375                 for (j = 0; j < len; j += 4) {
13376                         u32 val;
13377
13378                         tg3_write_mem(tp, offset + j, test_pattern[i]);
13379                         tg3_read_mem(tp, offset + j, &val);
13380                         if (val != test_pattern[i])
13381                                 return -EIO;
13382                 }
13383         }
13384         return 0;
13385 }
13386
13387 static int tg3_test_memory(struct tg3 *tp)
13388 {
13389         static struct mem_entry {
13390                 u32 offset;
13391                 u32 len;
13392         } mem_tbl_570x[] = {
13393                 { 0x00000000, 0x00b50},
13394                 { 0x00002000, 0x1c000},
13395                 { 0xffffffff, 0x00000}
13396         }, mem_tbl_5705[] = {
13397                 { 0x00000100, 0x0000c},
13398                 { 0x00000200, 0x00008},
13399                 { 0x00004000, 0x00800},
13400                 { 0x00006000, 0x01000},
13401                 { 0x00008000, 0x02000},
13402                 { 0x00010000, 0x0e000},
13403                 { 0xffffffff, 0x00000}
13404         }, mem_tbl_5755[] = {
13405                 { 0x00000200, 0x00008},
13406                 { 0x00004000, 0x00800},
13407                 { 0x00006000, 0x00800},
13408                 { 0x00008000, 0x02000},
13409                 { 0x00010000, 0x0c000},
13410                 { 0xffffffff, 0x00000}
13411         }, mem_tbl_5906[] = {
13412                 { 0x00000200, 0x00008},
13413                 { 0x00004000, 0x00400},
13414                 { 0x00006000, 0x00400},
13415                 { 0x00008000, 0x01000},
13416                 { 0x00010000, 0x01000},
13417                 { 0xffffffff, 0x00000}
13418         }, mem_tbl_5717[] = {
13419                 { 0x00000200, 0x00008},
13420                 { 0x00010000, 0x0a000},
13421                 { 0x00020000, 0x13c00},
13422                 { 0xffffffff, 0x00000}
13423         }, mem_tbl_57765[] = {
13424                 { 0x00000200, 0x00008},
13425                 { 0x00004000, 0x00800},
13426                 { 0x00006000, 0x09800},
13427                 { 0x00010000, 0x0a000},
13428                 { 0xffffffff, 0x00000}
13429         };
13430         struct mem_entry *mem_tbl;
13431         int err = 0;
13432         int i;
13433
13434         if (tg3_flag(tp, 5717_PLUS))
13435                 mem_tbl = mem_tbl_5717;
13436         else if (tg3_flag(tp, 57765_CLASS) ||
13437                  tg3_asic_rev(tp) == ASIC_REV_5762)
13438                 mem_tbl = mem_tbl_57765;
13439         else if (tg3_flag(tp, 5755_PLUS))
13440                 mem_tbl = mem_tbl_5755;
13441         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13442                 mem_tbl = mem_tbl_5906;
13443         else if (tg3_flag(tp, 5705_PLUS))
13444                 mem_tbl = mem_tbl_5705;
13445         else
13446                 mem_tbl = mem_tbl_570x;
13447
13448         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13449                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13450                 if (err)
13451                         break;
13452         }
13453
13454         return err;
13455 }
13456
13457 #define TG3_TSO_MSS             500
13458
13459 #define TG3_TSO_IP_HDR_LEN      20
13460 #define TG3_TSO_TCP_HDR_LEN     20
13461 #define TG3_TSO_TCP_OPT_LEN     12
13462
13463 static const u8 tg3_tso_header[] = {
13464 0x08, 0x00,
13465 0x45, 0x00, 0x00, 0x00,
13466 0x00, 0x00, 0x40, 0x00,
13467 0x40, 0x06, 0x00, 0x00,
13468 0x0a, 0x00, 0x00, 0x01,
13469 0x0a, 0x00, 0x00, 0x02,
13470 0x0d, 0x00, 0xe0, 0x00,
13471 0x00, 0x00, 0x01, 0x00,
13472 0x00, 0x00, 0x02, 0x00,
13473 0x80, 0x10, 0x10, 0x00,
13474 0x14, 0x09, 0x00, 0x00,
13475 0x01, 0x01, 0x08, 0x0a,
13476 0x11, 0x11, 0x11, 0x11,
13477 0x11, 0x11, 0x11, 0x11,
13478 };
13479
13480 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13481 {
13482         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13483         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13484         u32 budget;
13485         struct sk_buff *skb;
13486         u8 *tx_data, *rx_data;
13487         dma_addr_t map;
13488         int num_pkts, tx_len, rx_len, i, err;
13489         struct tg3_rx_buffer_desc *desc;
13490         struct tg3_napi *tnapi, *rnapi;
13491         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13492
13493         tnapi = &tp->napi[0];
13494         rnapi = &tp->napi[0];
13495         if (tp->irq_cnt > 1) {
13496                 if (tg3_flag(tp, ENABLE_RSS))
13497                         rnapi = &tp->napi[1];
13498                 if (tg3_flag(tp, ENABLE_TSS))
13499                         tnapi = &tp->napi[1];
13500         }
13501         coal_now = tnapi->coal_now | rnapi->coal_now;
13502
13503         err = -EIO;
13504
13505         tx_len = pktsz;
13506         skb = netdev_alloc_skb(tp->dev, tx_len);
13507         if (!skb)
13508                 return -ENOMEM;
13509
13510         tx_data = skb_put(skb, tx_len);
13511         memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13512         memset(tx_data + ETH_ALEN, 0x0, 8);
13513
13514         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13515
13516         if (tso_loopback) {
13517                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13518
13519                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13520                               TG3_TSO_TCP_OPT_LEN;
13521
13522                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13523                        sizeof(tg3_tso_header));
13524                 mss = TG3_TSO_MSS;
13525
13526                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13527                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13528
13529                 /* Set the total length field in the IP header */
13530                 iph->tot_len = htons((u16)(mss + hdr_len));
13531
13532                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13533                               TXD_FLAG_CPU_POST_DMA);
13534
13535                 if (tg3_flag(tp, HW_TSO_1) ||
13536                     tg3_flag(tp, HW_TSO_2) ||
13537                     tg3_flag(tp, HW_TSO_3)) {
13538                         struct tcphdr *th;
13539                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13540                         th = (struct tcphdr *)&tx_data[val];
13541                         th->check = 0;
13542                 } else
13543                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
13544
13545                 if (tg3_flag(tp, HW_TSO_3)) {
13546                         mss |= (hdr_len & 0xc) << 12;
13547                         if (hdr_len & 0x10)
13548                                 base_flags |= 0x00000010;
13549                         base_flags |= (hdr_len & 0x3e0) << 5;
13550                 } else if (tg3_flag(tp, HW_TSO_2))
13551                         mss |= hdr_len << 9;
13552                 else if (tg3_flag(tp, HW_TSO_1) ||
13553                          tg3_asic_rev(tp) == ASIC_REV_5705) {
13554                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13555                 } else {
13556                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13557                 }
13558
13559                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13560         } else {
13561                 num_pkts = 1;
13562                 data_off = ETH_HLEN;
13563
13564                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13565                     tx_len > VLAN_ETH_FRAME_LEN)
13566                         base_flags |= TXD_FLAG_JMB_PKT;
13567         }
13568
13569         for (i = data_off; i < tx_len; i++)
13570                 tx_data[i] = (u8) (i & 0xff);
13571
13572         map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE);
13573         if (dma_mapping_error(&tp->pdev->dev, map)) {
13574                 dev_kfree_skb(skb);
13575                 return -EIO;
13576         }
13577
13578         val = tnapi->tx_prod;
13579         tnapi->tx_buffers[val].skb = skb;
13580         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13581
13582         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13583                rnapi->coal_now);
13584
13585         udelay(10);
13586
13587         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13588
13589         budget = tg3_tx_avail(tnapi);
13590         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13591                             base_flags | TXD_FLAG_END, mss, 0)) {
13592                 tnapi->tx_buffers[val].skb = NULL;
13593                 dev_kfree_skb(skb);
13594                 return -EIO;
13595         }
13596
13597         tnapi->tx_prod++;
13598
13599         /* Sync BD data before updating mailbox */
13600         wmb();
13601
13602         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13603         tr32_mailbox(tnapi->prodmbox);
13604
13605         udelay(10);
13606
13607         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13608         for (i = 0; i < 35; i++) {
13609                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13610                        coal_now);
13611
13612                 udelay(10);
13613
13614                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13615                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13616                 if ((tx_idx == tnapi->tx_prod) &&
13617                     (rx_idx == (rx_start_idx + num_pkts)))
13618                         break;
13619         }
13620
13621         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13622         dev_kfree_skb(skb);
13623
13624         if (tx_idx != tnapi->tx_prod)
13625                 goto out;
13626
13627         if (rx_idx != rx_start_idx + num_pkts)
13628                 goto out;
13629
13630         val = data_off;
13631         while (rx_idx != rx_start_idx) {
13632                 desc = &rnapi->rx_rcb[rx_start_idx++];
13633                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13634                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13635
13636                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13637                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13638                         goto out;
13639
13640                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13641                          - ETH_FCS_LEN;
13642
13643                 if (!tso_loopback) {
13644                         if (rx_len != tx_len)
13645                                 goto out;
13646
13647                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13648                                 if (opaque_key != RXD_OPAQUE_RING_STD)
13649                                         goto out;
13650                         } else {
13651                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13652                                         goto out;
13653                         }
13654                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13655                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13656                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
13657                         goto out;
13658                 }
13659
13660                 if (opaque_key == RXD_OPAQUE_RING_STD) {
13661                         rx_data = tpr->rx_std_buffers[desc_idx].data;
13662                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13663                                              mapping);
13664                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13665                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13666                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13667                                              mapping);
13668                 } else
13669                         goto out;
13670
13671                 dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len,
13672                                         DMA_FROM_DEVICE);
13673
13674                 rx_data += TG3_RX_OFFSET(tp);
13675                 for (i = data_off; i < rx_len; i++, val++) {
13676                         if (*(rx_data + i) != (u8) (val & 0xff))
13677                                 goto out;
13678                 }
13679         }
13680
13681         err = 0;
13682
13683         /* tg3_free_rings will unmap and free the rx_data */
13684 out:
13685         return err;
13686 }
13687
13688 #define TG3_STD_LOOPBACK_FAILED         1
13689 #define TG3_JMB_LOOPBACK_FAILED         2
13690 #define TG3_TSO_LOOPBACK_FAILED         4
13691 #define TG3_LOOPBACK_FAILED \
13692         (TG3_STD_LOOPBACK_FAILED | \
13693          TG3_JMB_LOOPBACK_FAILED | \
13694          TG3_TSO_LOOPBACK_FAILED)
13695
13696 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13697 {
13698         int err = -EIO;
13699         u32 eee_cap;
13700         u32 jmb_pkt_sz = 9000;
13701
13702         if (tp->dma_limit)
13703                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13704
13705         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13706         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13707
13708         if (!netif_running(tp->dev)) {
13709                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13710                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13711                 if (do_extlpbk)
13712                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13713                 goto done;
13714         }
13715
13716         err = tg3_reset_hw(tp, true);
13717         if (err) {
13718                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13719                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13720                 if (do_extlpbk)
13721                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13722                 goto done;
13723         }
13724
13725         if (tg3_flag(tp, ENABLE_RSS)) {
13726                 int i;
13727
13728                 /* Reroute all rx packets to the 1st queue */
13729                 for (i = MAC_RSS_INDIR_TBL_0;
13730                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13731                         tw32(i, 0x0);
13732         }
13733
13734         /* HW errata - mac loopback fails in some cases on 5780.
13735          * Normal traffic and PHY loopback are not affected by
13736          * errata.  Also, the MAC loopback test is deprecated for
13737          * all newer ASIC revisions.
13738          */
13739         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13740             !tg3_flag(tp, CPMU_PRESENT)) {
13741                 tg3_mac_loopback(tp, true);
13742
13743                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13744                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13745
13746                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13747                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13748                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13749
13750                 tg3_mac_loopback(tp, false);
13751         }
13752
13753         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13754             !tg3_flag(tp, USE_PHYLIB)) {
13755                 int i;
13756
13757                 tg3_phy_lpbk_set(tp, 0, false);
13758
13759                 /* Wait for link */
13760                 for (i = 0; i < 100; i++) {
13761                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13762                                 break;
13763                         mdelay(1);
13764                 }
13765
13766                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13767                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13768                 if (tg3_flag(tp, TSO_CAPABLE) &&
13769                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13770                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13771                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13772                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13773                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13774
13775                 if (do_extlpbk) {
13776                         tg3_phy_lpbk_set(tp, 0, true);
13777
13778                         /* All link indications report up, but the hardware
13779                          * isn't really ready for about 20 msec.  Double it
13780                          * to be sure.
13781                          */
13782                         mdelay(40);
13783
13784                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13785                                 data[TG3_EXT_LOOPB_TEST] |=
13786                                                         TG3_STD_LOOPBACK_FAILED;
13787                         if (tg3_flag(tp, TSO_CAPABLE) &&
13788                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13789                                 data[TG3_EXT_LOOPB_TEST] |=
13790                                                         TG3_TSO_LOOPBACK_FAILED;
13791                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13792                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13793                                 data[TG3_EXT_LOOPB_TEST] |=
13794                                                         TG3_JMB_LOOPBACK_FAILED;
13795                 }
13796
13797                 /* Re-enable gphy autopowerdown. */
13798                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13799                         tg3_phy_toggle_apd(tp, true);
13800         }
13801
13802         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13803                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13804
13805 done:
13806         tp->phy_flags |= eee_cap;
13807
13808         return err;
13809 }
13810
13811 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13812                           u64 *data)
13813 {
13814         struct tg3 *tp = netdev_priv(dev);
13815         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13816
13817         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13818                 if (tg3_power_up(tp)) {
13819                         etest->flags |= ETH_TEST_FL_FAILED;
13820                         memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13821                         return;
13822                 }
13823                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13824         }
13825
13826         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13827
13828         if (tg3_test_nvram(tp) != 0) {
13829                 etest->flags |= ETH_TEST_FL_FAILED;
13830                 data[TG3_NVRAM_TEST] = 1;
13831         }
13832         if (!doextlpbk && tg3_test_link(tp)) {
13833                 etest->flags |= ETH_TEST_FL_FAILED;
13834                 data[TG3_LINK_TEST] = 1;
13835         }
13836         if (etest->flags & ETH_TEST_FL_OFFLINE) {
13837                 int err, err2 = 0, irq_sync = 0;
13838
13839                 if (netif_running(dev)) {
13840                         tg3_phy_stop(tp);
13841                         tg3_netif_stop(tp);
13842                         irq_sync = 1;
13843                 }
13844
13845                 tg3_full_lock(tp, irq_sync);
13846                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13847                 err = tg3_nvram_lock(tp);
13848                 tg3_halt_cpu(tp, RX_CPU_BASE);
13849                 if (!tg3_flag(tp, 5705_PLUS))
13850                         tg3_halt_cpu(tp, TX_CPU_BASE);
13851                 if (!err)
13852                         tg3_nvram_unlock(tp);
13853
13854                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13855                         tg3_phy_reset(tp);
13856
13857                 if (tg3_test_registers(tp) != 0) {
13858                         etest->flags |= ETH_TEST_FL_FAILED;
13859                         data[TG3_REGISTER_TEST] = 1;
13860                 }
13861
13862                 if (tg3_test_memory(tp) != 0) {
13863                         etest->flags |= ETH_TEST_FL_FAILED;
13864                         data[TG3_MEMORY_TEST] = 1;
13865                 }
13866
13867                 if (doextlpbk)
13868                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13869
13870                 if (tg3_test_loopback(tp, data, doextlpbk))
13871                         etest->flags |= ETH_TEST_FL_FAILED;
13872
13873                 tg3_full_unlock(tp);
13874
13875                 if (tg3_test_interrupt(tp) != 0) {
13876                         etest->flags |= ETH_TEST_FL_FAILED;
13877                         data[TG3_INTERRUPT_TEST] = 1;
13878                 }
13879
13880                 tg3_full_lock(tp, 0);
13881
13882                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13883                 if (netif_running(dev)) {
13884                         tg3_flag_set(tp, INIT_COMPLETE);
13885                         err2 = tg3_restart_hw(tp, true);
13886                         if (!err2)
13887                                 tg3_netif_start(tp);
13888                 }
13889
13890                 tg3_full_unlock(tp);
13891
13892                 if (irq_sync && !err2)
13893                         tg3_phy_start(tp);
13894         }
13895         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13896                 tg3_power_down_prepare(tp);
13897
13898 }
13899
13900 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13901 {
13902         struct tg3 *tp = netdev_priv(dev);
13903         struct hwtstamp_config stmpconf;
13904
13905         if (!tg3_flag(tp, PTP_CAPABLE))
13906                 return -EOPNOTSUPP;
13907
13908         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13909                 return -EFAULT;
13910
13911         if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13912             stmpconf.tx_type != HWTSTAMP_TX_OFF)
13913                 return -ERANGE;
13914
13915         switch (stmpconf.rx_filter) {
13916         case HWTSTAMP_FILTER_NONE:
13917                 tp->rxptpctl = 0;
13918                 break;
13919         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13920                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13921                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13922                 break;
13923         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13924                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13925                                TG3_RX_PTP_CTL_SYNC_EVNT;
13926                 break;
13927         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13928                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13929                                TG3_RX_PTP_CTL_DELAY_REQ;
13930                 break;
13931         case HWTSTAMP_FILTER_PTP_V2_EVENT:
13932                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13933                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13934                 break;
13935         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13936                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13937                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13938                 break;
13939         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13940                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13941                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13942                 break;
13943         case HWTSTAMP_FILTER_PTP_V2_SYNC:
13944                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13945                                TG3_RX_PTP_CTL_SYNC_EVNT;
13946                 break;
13947         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13948                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13949                                TG3_RX_PTP_CTL_SYNC_EVNT;
13950                 break;
13951         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13952                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13953                                TG3_RX_PTP_CTL_SYNC_EVNT;
13954                 break;
13955         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13956                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13957                                TG3_RX_PTP_CTL_DELAY_REQ;
13958                 break;
13959         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13960                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13961                                TG3_RX_PTP_CTL_DELAY_REQ;
13962                 break;
13963         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13964                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13965                                TG3_RX_PTP_CTL_DELAY_REQ;
13966                 break;
13967         default:
13968                 return -ERANGE;
13969         }
13970
13971         if (netif_running(dev) && tp->rxptpctl)
13972                 tw32(TG3_RX_PTP_CTL,
13973                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13974
13975         if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13976                 tg3_flag_set(tp, TX_TSTAMP_EN);
13977         else
13978                 tg3_flag_clear(tp, TX_TSTAMP_EN);
13979
13980         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13981                 -EFAULT : 0;
13982 }
13983
13984 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13985 {
13986         struct tg3 *tp = netdev_priv(dev);
13987         struct hwtstamp_config stmpconf;
13988
13989         if (!tg3_flag(tp, PTP_CAPABLE))
13990                 return -EOPNOTSUPP;
13991
13992         stmpconf.flags = 0;
13993         stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13994                             HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13995
13996         switch (tp->rxptpctl) {
13997         case 0:
13998                 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13999                 break;
14000         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
14001                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
14002                 break;
14003         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14004                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
14005                 break;
14006         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14007                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
14008                 break;
14009         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
14010                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
14011                 break;
14012         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
14013                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
14014                 break;
14015         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
14016                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
14017                 break;
14018         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14019                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
14020                 break;
14021         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14022                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
14023                 break;
14024         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14025                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
14026                 break;
14027         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14028                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
14029                 break;
14030         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14031                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
14032                 break;
14033         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14034                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
14035                 break;
14036         default:
14037                 WARN_ON_ONCE(1);
14038                 return -ERANGE;
14039         }
14040
14041         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
14042                 -EFAULT : 0;
14043 }
14044
14045 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
14046 {
14047         struct mii_ioctl_data *data = if_mii(ifr);
14048         struct tg3 *tp = netdev_priv(dev);
14049         int err;
14050
14051         if (tg3_flag(tp, USE_PHYLIB)) {
14052                 struct phy_device *phydev;
14053                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
14054                         return -EAGAIN;
14055                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
14056                 return phy_mii_ioctl(phydev, ifr, cmd);
14057         }
14058
14059         switch (cmd) {
14060         case SIOCGMIIPHY:
14061                 data->phy_id = tp->phy_addr;
14062
14063                 fallthrough;
14064         case SIOCGMIIREG: {
14065                 u32 mii_regval;
14066
14067                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14068                         break;                  /* We have no PHY */
14069
14070                 if (!netif_running(dev))
14071                         return -EAGAIN;
14072
14073                 spin_lock_bh(&tp->lock);
14074                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
14075                                     data->reg_num & 0x1f, &mii_regval);
14076                 spin_unlock_bh(&tp->lock);
14077
14078                 data->val_out = mii_regval;
14079
14080                 return err;
14081         }
14082
14083         case SIOCSMIIREG:
14084                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14085                         break;                  /* We have no PHY */
14086
14087                 if (!netif_running(dev))
14088                         return -EAGAIN;
14089
14090                 spin_lock_bh(&tp->lock);
14091                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
14092                                      data->reg_num & 0x1f, data->val_in);
14093                 spin_unlock_bh(&tp->lock);
14094
14095                 return err;
14096
14097         case SIOCSHWTSTAMP:
14098                 return tg3_hwtstamp_set(dev, ifr);
14099
14100         case SIOCGHWTSTAMP:
14101                 return tg3_hwtstamp_get(dev, ifr);
14102
14103         default:
14104                 /* do nothing */
14105                 break;
14106         }
14107         return -EOPNOTSUPP;
14108 }
14109
14110 static int tg3_get_coalesce(struct net_device *dev,
14111                             struct ethtool_coalesce *ec,
14112                             struct kernel_ethtool_coalesce *kernel_coal,
14113                             struct netlink_ext_ack *extack)
14114 {
14115         struct tg3 *tp = netdev_priv(dev);
14116
14117         memcpy(ec, &tp->coal, sizeof(*ec));
14118         return 0;
14119 }
14120
14121 static int tg3_set_coalesce(struct net_device *dev,
14122                             struct ethtool_coalesce *ec,
14123                             struct kernel_ethtool_coalesce *kernel_coal,
14124                             struct netlink_ext_ack *extack)
14125 {
14126         struct tg3 *tp = netdev_priv(dev);
14127         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14128         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14129
14130         if (!tg3_flag(tp, 5705_PLUS)) {
14131                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14132                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14133                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14134                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14135         }
14136
14137         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14138             (!ec->rx_coalesce_usecs) ||
14139             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14140             (!ec->tx_coalesce_usecs) ||
14141             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14142             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14143             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14144             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14145             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14146             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14147             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14148             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14149                 return -EINVAL;
14150
14151         /* Only copy relevant parameters, ignore all others. */
14152         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14153         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14154         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14155         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14156         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14157         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14158         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14159         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14160         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14161
14162         if (netif_running(dev)) {
14163                 tg3_full_lock(tp, 0);
14164                 __tg3_set_coalesce(tp, &tp->coal);
14165                 tg3_full_unlock(tp);
14166         }
14167         return 0;
14168 }
14169
14170 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14171 {
14172         struct tg3 *tp = netdev_priv(dev);
14173
14174         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14175                 netdev_warn(tp->dev, "Board does not support EEE!\n");
14176                 return -EOPNOTSUPP;
14177         }
14178
14179         if (edata->advertised != tp->eee.advertised) {
14180                 netdev_warn(tp->dev,
14181                             "Direct manipulation of EEE advertisement is not supported\n");
14182                 return -EINVAL;
14183         }
14184
14185         if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14186                 netdev_warn(tp->dev,
14187                             "Maximal Tx Lpi timer supported is %#x(u)\n",
14188                             TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14189                 return -EINVAL;
14190         }
14191
14192         tp->eee = *edata;
14193
14194         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14195         tg3_warn_mgmt_link_flap(tp);
14196
14197         if (netif_running(tp->dev)) {
14198                 tg3_full_lock(tp, 0);
14199                 tg3_setup_eee(tp);
14200                 tg3_phy_reset(tp);
14201                 tg3_full_unlock(tp);
14202         }
14203
14204         return 0;
14205 }
14206
14207 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14208 {
14209         struct tg3 *tp = netdev_priv(dev);
14210
14211         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14212                 netdev_warn(tp->dev,
14213                             "Board does not support EEE!\n");
14214                 return -EOPNOTSUPP;
14215         }
14216
14217         *edata = tp->eee;
14218         return 0;
14219 }
14220
14221 static const struct ethtool_ops tg3_ethtool_ops = {
14222         .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
14223                                      ETHTOOL_COALESCE_MAX_FRAMES |
14224                                      ETHTOOL_COALESCE_USECS_IRQ |
14225                                      ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
14226                                      ETHTOOL_COALESCE_STATS_BLOCK_USECS,
14227         .get_drvinfo            = tg3_get_drvinfo,
14228         .get_regs_len           = tg3_get_regs_len,
14229         .get_regs               = tg3_get_regs,
14230         .get_wol                = tg3_get_wol,
14231         .set_wol                = tg3_set_wol,
14232         .get_msglevel           = tg3_get_msglevel,
14233         .set_msglevel           = tg3_set_msglevel,
14234         .nway_reset             = tg3_nway_reset,
14235         .get_link               = ethtool_op_get_link,
14236         .get_eeprom_len         = tg3_get_eeprom_len,
14237         .get_eeprom             = tg3_get_eeprom,
14238         .set_eeprom             = tg3_set_eeprom,
14239         .get_ringparam          = tg3_get_ringparam,
14240         .set_ringparam          = tg3_set_ringparam,
14241         .get_pauseparam         = tg3_get_pauseparam,
14242         .set_pauseparam         = tg3_set_pauseparam,
14243         .self_test              = tg3_self_test,
14244         .get_strings            = tg3_get_strings,
14245         .set_phys_id            = tg3_set_phys_id,
14246         .get_ethtool_stats      = tg3_get_ethtool_stats,
14247         .get_coalesce           = tg3_get_coalesce,
14248         .set_coalesce           = tg3_set_coalesce,
14249         .get_sset_count         = tg3_get_sset_count,
14250         .get_rxnfc              = tg3_get_rxnfc,
14251         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14252         .get_rxfh               = tg3_get_rxfh,
14253         .set_rxfh               = tg3_set_rxfh,
14254         .get_channels           = tg3_get_channels,
14255         .set_channels           = tg3_set_channels,
14256         .get_ts_info            = tg3_get_ts_info,
14257         .get_eee                = tg3_get_eee,
14258         .set_eee                = tg3_set_eee,
14259         .get_link_ksettings     = tg3_get_link_ksettings,
14260         .set_link_ksettings     = tg3_set_link_ksettings,
14261 };
14262
14263 static void tg3_get_stats64(struct net_device *dev,
14264                             struct rtnl_link_stats64 *stats)
14265 {
14266         struct tg3 *tp = netdev_priv(dev);
14267
14268         spin_lock_bh(&tp->lock);
14269         if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14270                 *stats = tp->net_stats_prev;
14271                 spin_unlock_bh(&tp->lock);
14272                 return;
14273         }
14274
14275         tg3_get_nstats(tp, stats);
14276         spin_unlock_bh(&tp->lock);
14277 }
14278
14279 static void tg3_set_rx_mode(struct net_device *dev)
14280 {
14281         struct tg3 *tp = netdev_priv(dev);
14282
14283         if (!netif_running(dev))
14284                 return;
14285
14286         tg3_full_lock(tp, 0);
14287         __tg3_set_rx_mode(dev);
14288         tg3_full_unlock(tp);
14289 }
14290
14291 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14292                                int new_mtu)
14293 {
14294         dev->mtu = new_mtu;
14295
14296         if (new_mtu > ETH_DATA_LEN) {
14297                 if (tg3_flag(tp, 5780_CLASS)) {
14298                         netdev_update_features(dev);
14299                         tg3_flag_clear(tp, TSO_CAPABLE);
14300                 } else {
14301                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
14302                 }
14303         } else {
14304                 if (tg3_flag(tp, 5780_CLASS)) {
14305                         tg3_flag_set(tp, TSO_CAPABLE);
14306                         netdev_update_features(dev);
14307                 }
14308                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14309         }
14310 }
14311
14312 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14313 {
14314         struct tg3 *tp = netdev_priv(dev);
14315         int err;
14316         bool reset_phy = false;
14317
14318         if (!netif_running(dev)) {
14319                 /* We'll just catch it later when the
14320                  * device is up'd.
14321                  */
14322                 tg3_set_mtu(dev, tp, new_mtu);
14323                 return 0;
14324         }
14325
14326         tg3_phy_stop(tp);
14327
14328         tg3_netif_stop(tp);
14329
14330         tg3_set_mtu(dev, tp, new_mtu);
14331
14332         tg3_full_lock(tp, 1);
14333
14334         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14335
14336         /* Reset PHY, otherwise the read DMA engine will be in a mode that
14337          * breaks all requests to 256 bytes.
14338          */
14339         if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14340             tg3_asic_rev(tp) == ASIC_REV_5717 ||
14341             tg3_asic_rev(tp) == ASIC_REV_5719 ||
14342             tg3_asic_rev(tp) == ASIC_REV_5720)
14343                 reset_phy = true;
14344
14345         err = tg3_restart_hw(tp, reset_phy);
14346
14347         if (!err)
14348                 tg3_netif_start(tp);
14349
14350         tg3_full_unlock(tp);
14351
14352         if (!err)
14353                 tg3_phy_start(tp);
14354
14355         return err;
14356 }
14357
14358 static const struct net_device_ops tg3_netdev_ops = {
14359         .ndo_open               = tg3_open,
14360         .ndo_stop               = tg3_close,
14361         .ndo_start_xmit         = tg3_start_xmit,
14362         .ndo_get_stats64        = tg3_get_stats64,
14363         .ndo_validate_addr      = eth_validate_addr,
14364         .ndo_set_rx_mode        = tg3_set_rx_mode,
14365         .ndo_set_mac_address    = tg3_set_mac_addr,
14366         .ndo_eth_ioctl          = tg3_ioctl,
14367         .ndo_tx_timeout         = tg3_tx_timeout,
14368         .ndo_change_mtu         = tg3_change_mtu,
14369         .ndo_fix_features       = tg3_fix_features,
14370         .ndo_set_features       = tg3_set_features,
14371 #ifdef CONFIG_NET_POLL_CONTROLLER
14372         .ndo_poll_controller    = tg3_poll_controller,
14373 #endif
14374 };
14375
14376 static void tg3_get_eeprom_size(struct tg3 *tp)
14377 {
14378         u32 cursize, val, magic;
14379
14380         tp->nvram_size = EEPROM_CHIP_SIZE;
14381
14382         if (tg3_nvram_read(tp, 0, &magic) != 0)
14383                 return;
14384
14385         if ((magic != TG3_EEPROM_MAGIC) &&
14386             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14387             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14388                 return;
14389
14390         /*
14391          * Size the chip by reading offsets at increasing powers of two.
14392          * When we encounter our validation signature, we know the addressing
14393          * has wrapped around, and thus have our chip size.
14394          */
14395         cursize = 0x10;
14396
14397         while (cursize < tp->nvram_size) {
14398                 if (tg3_nvram_read(tp, cursize, &val) != 0)
14399                         return;
14400
14401                 if (val == magic)
14402                         break;
14403
14404                 cursize <<= 1;
14405         }
14406
14407         tp->nvram_size = cursize;
14408 }
14409
14410 static void tg3_get_nvram_size(struct tg3 *tp)
14411 {
14412         u32 val;
14413
14414         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14415                 return;
14416
14417         /* Selfboot format */
14418         if (val != TG3_EEPROM_MAGIC) {
14419                 tg3_get_eeprom_size(tp);
14420                 return;
14421         }
14422
14423         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14424                 if (val != 0) {
14425                         /* This is confusing.  We want to operate on the
14426                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14427                          * call will read from NVRAM and byteswap the data
14428                          * according to the byteswapping settings for all
14429                          * other register accesses.  This ensures the data we
14430                          * want will always reside in the lower 16-bits.
14431                          * However, the data in NVRAM is in LE format, which
14432                          * means the data from the NVRAM read will always be
14433                          * opposite the endianness of the CPU.  The 16-bit
14434                          * byteswap then brings the data to CPU endianness.
14435                          */
14436                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14437                         return;
14438                 }
14439         }
14440         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14441 }
14442
14443 static void tg3_get_nvram_info(struct tg3 *tp)
14444 {
14445         u32 nvcfg1;
14446
14447         nvcfg1 = tr32(NVRAM_CFG1);
14448         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14449                 tg3_flag_set(tp, FLASH);
14450         } else {
14451                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14452                 tw32(NVRAM_CFG1, nvcfg1);
14453         }
14454
14455         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14456             tg3_flag(tp, 5780_CLASS)) {
14457                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14458                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14459                         tp->nvram_jedecnum = JEDEC_ATMEL;
14460                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14461                         tg3_flag_set(tp, NVRAM_BUFFERED);
14462                         break;
14463                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14464                         tp->nvram_jedecnum = JEDEC_ATMEL;
14465                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14466                         break;
14467                 case FLASH_VENDOR_ATMEL_EEPROM:
14468                         tp->nvram_jedecnum = JEDEC_ATMEL;
14469                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14470                         tg3_flag_set(tp, NVRAM_BUFFERED);
14471                         break;
14472                 case FLASH_VENDOR_ST:
14473                         tp->nvram_jedecnum = JEDEC_ST;
14474                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14475                         tg3_flag_set(tp, NVRAM_BUFFERED);
14476                         break;
14477                 case FLASH_VENDOR_SAIFUN:
14478                         tp->nvram_jedecnum = JEDEC_SAIFUN;
14479                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14480                         break;
14481                 case FLASH_VENDOR_SST_SMALL:
14482                 case FLASH_VENDOR_SST_LARGE:
14483                         tp->nvram_jedecnum = JEDEC_SST;
14484                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14485                         break;
14486                 }
14487         } else {
14488                 tp->nvram_jedecnum = JEDEC_ATMEL;
14489                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14490                 tg3_flag_set(tp, NVRAM_BUFFERED);
14491         }
14492 }
14493
14494 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14495 {
14496         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14497         case FLASH_5752PAGE_SIZE_256:
14498                 tp->nvram_pagesize = 256;
14499                 break;
14500         case FLASH_5752PAGE_SIZE_512:
14501                 tp->nvram_pagesize = 512;
14502                 break;
14503         case FLASH_5752PAGE_SIZE_1K:
14504                 tp->nvram_pagesize = 1024;
14505                 break;
14506         case FLASH_5752PAGE_SIZE_2K:
14507                 tp->nvram_pagesize = 2048;
14508                 break;
14509         case FLASH_5752PAGE_SIZE_4K:
14510                 tp->nvram_pagesize = 4096;
14511                 break;
14512         case FLASH_5752PAGE_SIZE_264:
14513                 tp->nvram_pagesize = 264;
14514                 break;
14515         case FLASH_5752PAGE_SIZE_528:
14516                 tp->nvram_pagesize = 528;
14517                 break;
14518         }
14519 }
14520
14521 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14522 {
14523         u32 nvcfg1;
14524
14525         nvcfg1 = tr32(NVRAM_CFG1);
14526
14527         /* NVRAM protection for TPM */
14528         if (nvcfg1 & (1 << 27))
14529                 tg3_flag_set(tp, PROTECTED_NVRAM);
14530
14531         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14532         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14533         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14534                 tp->nvram_jedecnum = JEDEC_ATMEL;
14535                 tg3_flag_set(tp, NVRAM_BUFFERED);
14536                 break;
14537         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14538                 tp->nvram_jedecnum = JEDEC_ATMEL;
14539                 tg3_flag_set(tp, NVRAM_BUFFERED);
14540                 tg3_flag_set(tp, FLASH);
14541                 break;
14542         case FLASH_5752VENDOR_ST_M45PE10:
14543         case FLASH_5752VENDOR_ST_M45PE20:
14544         case FLASH_5752VENDOR_ST_M45PE40:
14545                 tp->nvram_jedecnum = JEDEC_ST;
14546                 tg3_flag_set(tp, NVRAM_BUFFERED);
14547                 tg3_flag_set(tp, FLASH);
14548                 break;
14549         }
14550
14551         if (tg3_flag(tp, FLASH)) {
14552                 tg3_nvram_get_pagesize(tp, nvcfg1);
14553         } else {
14554                 /* For eeprom, set pagesize to maximum eeprom size */
14555                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14556
14557                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14558                 tw32(NVRAM_CFG1, nvcfg1);
14559         }
14560 }
14561
14562 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14563 {
14564         u32 nvcfg1, protect = 0;
14565
14566         nvcfg1 = tr32(NVRAM_CFG1);
14567
14568         /* NVRAM protection for TPM */
14569         if (nvcfg1 & (1 << 27)) {
14570                 tg3_flag_set(tp, PROTECTED_NVRAM);
14571                 protect = 1;
14572         }
14573
14574         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14575         switch (nvcfg1) {
14576         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14577         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14578         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14579         case FLASH_5755VENDOR_ATMEL_FLASH_5:
14580                 tp->nvram_jedecnum = JEDEC_ATMEL;
14581                 tg3_flag_set(tp, NVRAM_BUFFERED);
14582                 tg3_flag_set(tp, FLASH);
14583                 tp->nvram_pagesize = 264;
14584                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14585                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14586                         tp->nvram_size = (protect ? 0x3e200 :
14587                                           TG3_NVRAM_SIZE_512KB);
14588                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14589                         tp->nvram_size = (protect ? 0x1f200 :
14590                                           TG3_NVRAM_SIZE_256KB);
14591                 else
14592                         tp->nvram_size = (protect ? 0x1f200 :
14593                                           TG3_NVRAM_SIZE_128KB);
14594                 break;
14595         case FLASH_5752VENDOR_ST_M45PE10:
14596         case FLASH_5752VENDOR_ST_M45PE20:
14597         case FLASH_5752VENDOR_ST_M45PE40:
14598                 tp->nvram_jedecnum = JEDEC_ST;
14599                 tg3_flag_set(tp, NVRAM_BUFFERED);
14600                 tg3_flag_set(tp, FLASH);
14601                 tp->nvram_pagesize = 256;
14602                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14603                         tp->nvram_size = (protect ?
14604                                           TG3_NVRAM_SIZE_64KB :
14605                                           TG3_NVRAM_SIZE_128KB);
14606                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14607                         tp->nvram_size = (protect ?
14608                                           TG3_NVRAM_SIZE_64KB :
14609                                           TG3_NVRAM_SIZE_256KB);
14610                 else
14611                         tp->nvram_size = (protect ?
14612                                           TG3_NVRAM_SIZE_128KB :
14613                                           TG3_NVRAM_SIZE_512KB);
14614                 break;
14615         }
14616 }
14617
14618 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14619 {
14620         u32 nvcfg1;
14621
14622         nvcfg1 = tr32(NVRAM_CFG1);
14623
14624         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14625         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14626         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14627         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14628         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14629                 tp->nvram_jedecnum = JEDEC_ATMEL;
14630                 tg3_flag_set(tp, NVRAM_BUFFERED);
14631                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14632
14633                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14634                 tw32(NVRAM_CFG1, nvcfg1);
14635                 break;
14636         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14637         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14638         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14639         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14640                 tp->nvram_jedecnum = JEDEC_ATMEL;
14641                 tg3_flag_set(tp, NVRAM_BUFFERED);
14642                 tg3_flag_set(tp, FLASH);
14643                 tp->nvram_pagesize = 264;
14644                 break;
14645         case FLASH_5752VENDOR_ST_M45PE10:
14646         case FLASH_5752VENDOR_ST_M45PE20:
14647         case FLASH_5752VENDOR_ST_M45PE40:
14648                 tp->nvram_jedecnum = JEDEC_ST;
14649                 tg3_flag_set(tp, NVRAM_BUFFERED);
14650                 tg3_flag_set(tp, FLASH);
14651                 tp->nvram_pagesize = 256;
14652                 break;
14653         }
14654 }
14655
14656 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14657 {
14658         u32 nvcfg1, protect = 0;
14659
14660         nvcfg1 = tr32(NVRAM_CFG1);
14661
14662         /* NVRAM protection for TPM */
14663         if (nvcfg1 & (1 << 27)) {
14664                 tg3_flag_set(tp, PROTECTED_NVRAM);
14665                 protect = 1;
14666         }
14667
14668         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14669         switch (nvcfg1) {
14670         case FLASH_5761VENDOR_ATMEL_ADB021D:
14671         case FLASH_5761VENDOR_ATMEL_ADB041D:
14672         case FLASH_5761VENDOR_ATMEL_ADB081D:
14673         case FLASH_5761VENDOR_ATMEL_ADB161D:
14674         case FLASH_5761VENDOR_ATMEL_MDB021D:
14675         case FLASH_5761VENDOR_ATMEL_MDB041D:
14676         case FLASH_5761VENDOR_ATMEL_MDB081D:
14677         case FLASH_5761VENDOR_ATMEL_MDB161D:
14678                 tp->nvram_jedecnum = JEDEC_ATMEL;
14679                 tg3_flag_set(tp, NVRAM_BUFFERED);
14680                 tg3_flag_set(tp, FLASH);
14681                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14682                 tp->nvram_pagesize = 256;
14683                 break;
14684         case FLASH_5761VENDOR_ST_A_M45PE20:
14685         case FLASH_5761VENDOR_ST_A_M45PE40:
14686         case FLASH_5761VENDOR_ST_A_M45PE80:
14687         case FLASH_5761VENDOR_ST_A_M45PE16:
14688         case FLASH_5761VENDOR_ST_M_M45PE20:
14689         case FLASH_5761VENDOR_ST_M_M45PE40:
14690         case FLASH_5761VENDOR_ST_M_M45PE80:
14691         case FLASH_5761VENDOR_ST_M_M45PE16:
14692                 tp->nvram_jedecnum = JEDEC_ST;
14693                 tg3_flag_set(tp, NVRAM_BUFFERED);
14694                 tg3_flag_set(tp, FLASH);
14695                 tp->nvram_pagesize = 256;
14696                 break;
14697         }
14698
14699         if (protect) {
14700                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14701         } else {
14702                 switch (nvcfg1) {
14703                 case FLASH_5761VENDOR_ATMEL_ADB161D:
14704                 case FLASH_5761VENDOR_ATMEL_MDB161D:
14705                 case FLASH_5761VENDOR_ST_A_M45PE16:
14706                 case FLASH_5761VENDOR_ST_M_M45PE16:
14707                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14708                         break;
14709                 case FLASH_5761VENDOR_ATMEL_ADB081D:
14710                 case FLASH_5761VENDOR_ATMEL_MDB081D:
14711                 case FLASH_5761VENDOR_ST_A_M45PE80:
14712                 case FLASH_5761VENDOR_ST_M_M45PE80:
14713                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14714                         break;
14715                 case FLASH_5761VENDOR_ATMEL_ADB041D:
14716                 case FLASH_5761VENDOR_ATMEL_MDB041D:
14717                 case FLASH_5761VENDOR_ST_A_M45PE40:
14718                 case FLASH_5761VENDOR_ST_M_M45PE40:
14719                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14720                         break;
14721                 case FLASH_5761VENDOR_ATMEL_ADB021D:
14722                 case FLASH_5761VENDOR_ATMEL_MDB021D:
14723                 case FLASH_5761VENDOR_ST_A_M45PE20:
14724                 case FLASH_5761VENDOR_ST_M_M45PE20:
14725                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14726                         break;
14727                 }
14728         }
14729 }
14730
14731 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14732 {
14733         tp->nvram_jedecnum = JEDEC_ATMEL;
14734         tg3_flag_set(tp, NVRAM_BUFFERED);
14735         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14736 }
14737
14738 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14739 {
14740         u32 nvcfg1;
14741
14742         nvcfg1 = tr32(NVRAM_CFG1);
14743
14744         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14745         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14746         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14747                 tp->nvram_jedecnum = JEDEC_ATMEL;
14748                 tg3_flag_set(tp, NVRAM_BUFFERED);
14749                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14750
14751                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14752                 tw32(NVRAM_CFG1, nvcfg1);
14753                 return;
14754         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14755         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14756         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14757         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14758         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14759         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14760         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14761                 tp->nvram_jedecnum = JEDEC_ATMEL;
14762                 tg3_flag_set(tp, NVRAM_BUFFERED);
14763                 tg3_flag_set(tp, FLASH);
14764
14765                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14766                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14767                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14768                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14769                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14770                         break;
14771                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14772                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14773                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14774                         break;
14775                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14776                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14777                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14778                         break;
14779                 }
14780                 break;
14781         case FLASH_5752VENDOR_ST_M45PE10:
14782         case FLASH_5752VENDOR_ST_M45PE20:
14783         case FLASH_5752VENDOR_ST_M45PE40:
14784                 tp->nvram_jedecnum = JEDEC_ST;
14785                 tg3_flag_set(tp, NVRAM_BUFFERED);
14786                 tg3_flag_set(tp, FLASH);
14787
14788                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14789                 case FLASH_5752VENDOR_ST_M45PE10:
14790                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14791                         break;
14792                 case FLASH_5752VENDOR_ST_M45PE20:
14793                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14794                         break;
14795                 case FLASH_5752VENDOR_ST_M45PE40:
14796                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14797                         break;
14798                 }
14799                 break;
14800         default:
14801                 tg3_flag_set(tp, NO_NVRAM);
14802                 return;
14803         }
14804
14805         tg3_nvram_get_pagesize(tp, nvcfg1);
14806         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14807                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14808 }
14809
14810
14811 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14812 {
14813         u32 nvcfg1;
14814
14815         nvcfg1 = tr32(NVRAM_CFG1);
14816
14817         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14818         case FLASH_5717VENDOR_ATMEL_EEPROM:
14819         case FLASH_5717VENDOR_MICRO_EEPROM:
14820                 tp->nvram_jedecnum = JEDEC_ATMEL;
14821                 tg3_flag_set(tp, NVRAM_BUFFERED);
14822                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14823
14824                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14825                 tw32(NVRAM_CFG1, nvcfg1);
14826                 return;
14827         case FLASH_5717VENDOR_ATMEL_MDB011D:
14828         case FLASH_5717VENDOR_ATMEL_ADB011B:
14829         case FLASH_5717VENDOR_ATMEL_ADB011D:
14830         case FLASH_5717VENDOR_ATMEL_MDB021D:
14831         case FLASH_5717VENDOR_ATMEL_ADB021B:
14832         case FLASH_5717VENDOR_ATMEL_ADB021D:
14833         case FLASH_5717VENDOR_ATMEL_45USPT:
14834                 tp->nvram_jedecnum = JEDEC_ATMEL;
14835                 tg3_flag_set(tp, NVRAM_BUFFERED);
14836                 tg3_flag_set(tp, FLASH);
14837
14838                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14839                 case FLASH_5717VENDOR_ATMEL_MDB021D:
14840                         /* Detect size with tg3_nvram_get_size() */
14841                         break;
14842                 case FLASH_5717VENDOR_ATMEL_ADB021B:
14843                 case FLASH_5717VENDOR_ATMEL_ADB021D:
14844                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14845                         break;
14846                 default:
14847                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14848                         break;
14849                 }
14850                 break;
14851         case FLASH_5717VENDOR_ST_M_M25PE10:
14852         case FLASH_5717VENDOR_ST_A_M25PE10:
14853         case FLASH_5717VENDOR_ST_M_M45PE10:
14854         case FLASH_5717VENDOR_ST_A_M45PE10:
14855         case FLASH_5717VENDOR_ST_M_M25PE20:
14856         case FLASH_5717VENDOR_ST_A_M25PE20:
14857         case FLASH_5717VENDOR_ST_M_M45PE20:
14858         case FLASH_5717VENDOR_ST_A_M45PE20:
14859         case FLASH_5717VENDOR_ST_25USPT:
14860         case FLASH_5717VENDOR_ST_45USPT:
14861                 tp->nvram_jedecnum = JEDEC_ST;
14862                 tg3_flag_set(tp, NVRAM_BUFFERED);
14863                 tg3_flag_set(tp, FLASH);
14864
14865                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14866                 case FLASH_5717VENDOR_ST_M_M25PE20:
14867                 case FLASH_5717VENDOR_ST_M_M45PE20:
14868                         /* Detect size with tg3_nvram_get_size() */
14869                         break;
14870                 case FLASH_5717VENDOR_ST_A_M25PE20:
14871                 case FLASH_5717VENDOR_ST_A_M45PE20:
14872                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14873                         break;
14874                 default:
14875                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14876                         break;
14877                 }
14878                 break;
14879         default:
14880                 tg3_flag_set(tp, NO_NVRAM);
14881                 return;
14882         }
14883
14884         tg3_nvram_get_pagesize(tp, nvcfg1);
14885         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14886                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14887 }
14888
14889 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14890 {
14891         u32 nvcfg1, nvmpinstrp, nv_status;
14892
14893         nvcfg1 = tr32(NVRAM_CFG1);
14894         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14895
14896         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14897                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14898                         tg3_flag_set(tp, NO_NVRAM);
14899                         return;
14900                 }
14901
14902                 switch (nvmpinstrp) {
14903                 case FLASH_5762_MX25L_100:
14904                 case FLASH_5762_MX25L_200:
14905                 case FLASH_5762_MX25L_400:
14906                 case FLASH_5762_MX25L_800:
14907                 case FLASH_5762_MX25L_160_320:
14908                         tp->nvram_pagesize = 4096;
14909                         tp->nvram_jedecnum = JEDEC_MACRONIX;
14910                         tg3_flag_set(tp, NVRAM_BUFFERED);
14911                         tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14912                         tg3_flag_set(tp, FLASH);
14913                         nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14914                         tp->nvram_size =
14915                                 (1 << (nv_status >> AUTOSENSE_DEVID &
14916                                                 AUTOSENSE_DEVID_MASK)
14917                                         << AUTOSENSE_SIZE_IN_MB);
14918                         return;
14919
14920                 case FLASH_5762_EEPROM_HD:
14921                         nvmpinstrp = FLASH_5720_EEPROM_HD;
14922                         break;
14923                 case FLASH_5762_EEPROM_LD:
14924                         nvmpinstrp = FLASH_5720_EEPROM_LD;
14925                         break;
14926                 case FLASH_5720VENDOR_M_ST_M45PE20:
14927                         /* This pinstrap supports multiple sizes, so force it
14928                          * to read the actual size from location 0xf0.
14929                          */
14930                         nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14931                         break;
14932                 }
14933         }
14934
14935         switch (nvmpinstrp) {
14936         case FLASH_5720_EEPROM_HD:
14937         case FLASH_5720_EEPROM_LD:
14938                 tp->nvram_jedecnum = JEDEC_ATMEL;
14939                 tg3_flag_set(tp, NVRAM_BUFFERED);
14940
14941                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14942                 tw32(NVRAM_CFG1, nvcfg1);
14943                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14944                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14945                 else
14946                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14947                 return;
14948         case FLASH_5720VENDOR_M_ATMEL_DB011D:
14949         case FLASH_5720VENDOR_A_ATMEL_DB011B:
14950         case FLASH_5720VENDOR_A_ATMEL_DB011D:
14951         case FLASH_5720VENDOR_M_ATMEL_DB021D:
14952         case FLASH_5720VENDOR_A_ATMEL_DB021B:
14953         case FLASH_5720VENDOR_A_ATMEL_DB021D:
14954         case FLASH_5720VENDOR_M_ATMEL_DB041D:
14955         case FLASH_5720VENDOR_A_ATMEL_DB041B:
14956         case FLASH_5720VENDOR_A_ATMEL_DB041D:
14957         case FLASH_5720VENDOR_M_ATMEL_DB081D:
14958         case FLASH_5720VENDOR_A_ATMEL_DB081D:
14959         case FLASH_5720VENDOR_ATMEL_45USPT:
14960                 tp->nvram_jedecnum = JEDEC_ATMEL;
14961                 tg3_flag_set(tp, NVRAM_BUFFERED);
14962                 tg3_flag_set(tp, FLASH);
14963
14964                 switch (nvmpinstrp) {
14965                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14966                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14967                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14968                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14969                         break;
14970                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14971                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14972                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14973                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14974                         break;
14975                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14976                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14977                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14978                         break;
14979                 default:
14980                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14981                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14982                         break;
14983                 }
14984                 break;
14985         case FLASH_5720VENDOR_M_ST_M25PE10:
14986         case FLASH_5720VENDOR_M_ST_M45PE10:
14987         case FLASH_5720VENDOR_A_ST_M25PE10:
14988         case FLASH_5720VENDOR_A_ST_M45PE10:
14989         case FLASH_5720VENDOR_M_ST_M25PE20:
14990         case FLASH_5720VENDOR_M_ST_M45PE20:
14991         case FLASH_5720VENDOR_A_ST_M25PE20:
14992         case FLASH_5720VENDOR_A_ST_M45PE20:
14993         case FLASH_5720VENDOR_M_ST_M25PE40:
14994         case FLASH_5720VENDOR_M_ST_M45PE40:
14995         case FLASH_5720VENDOR_A_ST_M25PE40:
14996         case FLASH_5720VENDOR_A_ST_M45PE40:
14997         case FLASH_5720VENDOR_M_ST_M25PE80:
14998         case FLASH_5720VENDOR_M_ST_M45PE80:
14999         case FLASH_5720VENDOR_A_ST_M25PE80:
15000         case FLASH_5720VENDOR_A_ST_M45PE80:
15001         case FLASH_5720VENDOR_ST_25USPT:
15002         case FLASH_5720VENDOR_ST_45USPT:
15003                 tp->nvram_jedecnum = JEDEC_ST;
15004                 tg3_flag_set(tp, NVRAM_BUFFERED);
15005                 tg3_flag_set(tp, FLASH);
15006
15007                 switch (nvmpinstrp) {
15008                 case FLASH_5720VENDOR_M_ST_M25PE20:
15009                 case FLASH_5720VENDOR_M_ST_M45PE20:
15010                 case FLASH_5720VENDOR_A_ST_M25PE20:
15011                 case FLASH_5720VENDOR_A_ST_M45PE20:
15012                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
15013                         break;
15014                 case FLASH_5720VENDOR_M_ST_M25PE40:
15015                 case FLASH_5720VENDOR_M_ST_M45PE40:
15016                 case FLASH_5720VENDOR_A_ST_M25PE40:
15017                 case FLASH_5720VENDOR_A_ST_M45PE40:
15018                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
15019                         break;
15020                 case FLASH_5720VENDOR_M_ST_M25PE80:
15021                 case FLASH_5720VENDOR_M_ST_M45PE80:
15022                 case FLASH_5720VENDOR_A_ST_M25PE80:
15023                 case FLASH_5720VENDOR_A_ST_M45PE80:
15024                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
15025                         break;
15026                 default:
15027                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
15028                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
15029                         break;
15030                 }
15031                 break;
15032         default:
15033                 tg3_flag_set(tp, NO_NVRAM);
15034                 return;
15035         }
15036
15037         tg3_nvram_get_pagesize(tp, nvcfg1);
15038         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
15039                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
15040
15041         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
15042                 u32 val;
15043
15044                 if (tg3_nvram_read(tp, 0, &val))
15045                         return;
15046
15047                 if (val != TG3_EEPROM_MAGIC &&
15048                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
15049                         tg3_flag_set(tp, NO_NVRAM);
15050         }
15051 }
15052
15053 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
15054 static void tg3_nvram_init(struct tg3 *tp)
15055 {
15056         if (tg3_flag(tp, IS_SSB_CORE)) {
15057                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
15058                 tg3_flag_clear(tp, NVRAM);
15059                 tg3_flag_clear(tp, NVRAM_BUFFERED);
15060                 tg3_flag_set(tp, NO_NVRAM);
15061                 return;
15062         }
15063
15064         tw32_f(GRC_EEPROM_ADDR,
15065              (EEPROM_ADDR_FSM_RESET |
15066               (EEPROM_DEFAULT_CLOCK_PERIOD <<
15067                EEPROM_ADDR_CLKPERD_SHIFT)));
15068
15069         msleep(1);
15070
15071         /* Enable seeprom accesses. */
15072         tw32_f(GRC_LOCAL_CTRL,
15073              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
15074         udelay(100);
15075
15076         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15077             tg3_asic_rev(tp) != ASIC_REV_5701) {
15078                 tg3_flag_set(tp, NVRAM);
15079
15080                 if (tg3_nvram_lock(tp)) {
15081                         netdev_warn(tp->dev,
15082                                     "Cannot get nvram lock, %s failed\n",
15083                                     __func__);
15084                         return;
15085                 }
15086                 tg3_enable_nvram_access(tp);
15087
15088                 tp->nvram_size = 0;
15089
15090                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
15091                         tg3_get_5752_nvram_info(tp);
15092                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15093                         tg3_get_5755_nvram_info(tp);
15094                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15095                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
15096                          tg3_asic_rev(tp) == ASIC_REV_5785)
15097                         tg3_get_5787_nvram_info(tp);
15098                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15099                         tg3_get_5761_nvram_info(tp);
15100                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15101                         tg3_get_5906_nvram_info(tp);
15102                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15103                          tg3_flag(tp, 57765_CLASS))
15104                         tg3_get_57780_nvram_info(tp);
15105                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15106                          tg3_asic_rev(tp) == ASIC_REV_5719)
15107                         tg3_get_5717_nvram_info(tp);
15108                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15109                          tg3_asic_rev(tp) == ASIC_REV_5762)
15110                         tg3_get_5720_nvram_info(tp);
15111                 else
15112                         tg3_get_nvram_info(tp);
15113
15114                 if (tp->nvram_size == 0)
15115                         tg3_get_nvram_size(tp);
15116
15117                 tg3_disable_nvram_access(tp);
15118                 tg3_nvram_unlock(tp);
15119
15120         } else {
15121                 tg3_flag_clear(tp, NVRAM);
15122                 tg3_flag_clear(tp, NVRAM_BUFFERED);
15123
15124                 tg3_get_eeprom_size(tp);
15125         }
15126 }
15127
15128 struct subsys_tbl_ent {
15129         u16 subsys_vendor, subsys_devid;
15130         u32 phy_id;
15131 };
15132
15133 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15134         /* Broadcom boards. */
15135         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15136           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15137         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15138           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15139         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15140           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15141         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15142           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15143         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15144           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15145         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15146           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15147         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15148           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15149         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15150           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15151         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15152           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15153         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15154           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15155         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15156           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15157
15158         /* 3com boards. */
15159         { TG3PCI_SUBVENDOR_ID_3COM,
15160           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15161         { TG3PCI_SUBVENDOR_ID_3COM,
15162           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15163         { TG3PCI_SUBVENDOR_ID_3COM,
15164           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15165         { TG3PCI_SUBVENDOR_ID_3COM,
15166           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15167         { TG3PCI_SUBVENDOR_ID_3COM,
15168           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15169
15170         /* DELL boards. */
15171         { TG3PCI_SUBVENDOR_ID_DELL,
15172           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15173         { TG3PCI_SUBVENDOR_ID_DELL,
15174           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15175         { TG3PCI_SUBVENDOR_ID_DELL,
15176           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15177         { TG3PCI_SUBVENDOR_ID_DELL,
15178           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15179
15180         /* Compaq boards. */
15181         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15182           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15183         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15184           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15185         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15186           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15187         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15188           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15189         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15190           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15191
15192         /* IBM boards. */
15193         { TG3PCI_SUBVENDOR_ID_IBM,
15194           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15195 };
15196
15197 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15198 {
15199         int i;
15200
15201         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15202                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15203                      tp->pdev->subsystem_vendor) &&
15204                     (subsys_id_to_phy_id[i].subsys_devid ==
15205                      tp->pdev->subsystem_device))
15206                         return &subsys_id_to_phy_id[i];
15207         }
15208         return NULL;
15209 }
15210
15211 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15212 {
15213         u32 val;
15214
15215         tp->phy_id = TG3_PHY_ID_INVALID;
15216         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15217
15218         /* Assume an onboard device and WOL capable by default.  */
15219         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15220         tg3_flag_set(tp, WOL_CAP);
15221
15222         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15223                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15224                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15225                         tg3_flag_set(tp, IS_NIC);
15226                 }
15227                 val = tr32(VCPU_CFGSHDW);
15228                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15229                         tg3_flag_set(tp, ASPM_WORKAROUND);
15230                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15231                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15232                         tg3_flag_set(tp, WOL_ENABLE);
15233                         device_set_wakeup_enable(&tp->pdev->dev, true);
15234                 }
15235                 goto done;
15236         }
15237
15238         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15239         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15240                 u32 nic_cfg, led_cfg;
15241                 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15242                 u32 nic_phy_id, ver, eeprom_phy_id;
15243                 int eeprom_phy_serdes = 0;
15244
15245                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15246                 tp->nic_sram_data_cfg = nic_cfg;
15247
15248                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15249                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15250                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15251                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
15252                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
15253                     (ver > 0) && (ver < 0x100))
15254                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15255
15256                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15257                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15258
15259                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15260                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
15261                     tg3_asic_rev(tp) == ASIC_REV_5720)
15262                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15263
15264                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15265                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15266                         eeprom_phy_serdes = 1;
15267
15268                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15269                 if (nic_phy_id != 0) {
15270                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15271                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15272
15273                         eeprom_phy_id  = (id1 >> 16) << 10;
15274                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
15275                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15276                 } else
15277                         eeprom_phy_id = 0;
15278
15279                 tp->phy_id = eeprom_phy_id;
15280                 if (eeprom_phy_serdes) {
15281                         if (!tg3_flag(tp, 5705_PLUS))
15282                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15283                         else
15284                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15285                 }
15286
15287                 if (tg3_flag(tp, 5750_PLUS))
15288                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15289                                     SHASTA_EXT_LED_MODE_MASK);
15290                 else
15291                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15292
15293                 switch (led_cfg) {
15294                 default:
15295                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15296                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15297                         break;
15298
15299                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15300                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15301                         break;
15302
15303                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15304                         tp->led_ctrl = LED_CTRL_MODE_MAC;
15305
15306                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15307                          * read on some older 5700/5701 bootcode.
15308                          */
15309                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15310                             tg3_asic_rev(tp) == ASIC_REV_5701)
15311                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15312
15313                         break;
15314
15315                 case SHASTA_EXT_LED_SHARED:
15316                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
15317                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15318                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15319                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15320                                                  LED_CTRL_MODE_PHY_2);
15321
15322                         if (tg3_flag(tp, 5717_PLUS) ||
15323                             tg3_asic_rev(tp) == ASIC_REV_5762)
15324                                 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15325                                                 LED_CTRL_BLINK_RATE_MASK;
15326
15327                         break;
15328
15329                 case SHASTA_EXT_LED_MAC:
15330                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15331                         break;
15332
15333                 case SHASTA_EXT_LED_COMBO:
15334                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
15335                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15336                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15337                                                  LED_CTRL_MODE_PHY_2);
15338                         break;
15339
15340                 }
15341
15342                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15343                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
15344                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15345                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15346
15347                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15348                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15349
15350                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15351                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15352                         if ((tp->pdev->subsystem_vendor ==
15353                              PCI_VENDOR_ID_ARIMA) &&
15354                             (tp->pdev->subsystem_device == 0x205a ||
15355                              tp->pdev->subsystem_device == 0x2063))
15356                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15357                 } else {
15358                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15359                         tg3_flag_set(tp, IS_NIC);
15360                 }
15361
15362                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15363                         tg3_flag_set(tp, ENABLE_ASF);
15364                         if (tg3_flag(tp, 5750_PLUS))
15365                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15366                 }
15367
15368                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15369                     tg3_flag(tp, 5750_PLUS))
15370                         tg3_flag_set(tp, ENABLE_APE);
15371
15372                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15373                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15374                         tg3_flag_clear(tp, WOL_CAP);
15375
15376                 if (tg3_flag(tp, WOL_CAP) &&
15377                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15378                         tg3_flag_set(tp, WOL_ENABLE);
15379                         device_set_wakeup_enable(&tp->pdev->dev, true);
15380                 }
15381
15382                 if (cfg2 & (1 << 17))
15383                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15384
15385                 /* serdes signal pre-emphasis in register 0x590 set by */
15386                 /* bootcode if bit 18 is set */
15387                 if (cfg2 & (1 << 18))
15388                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15389
15390                 if ((tg3_flag(tp, 57765_PLUS) ||
15391                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15392                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15393                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15394                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15395
15396                 if (tg3_flag(tp, PCI_EXPRESS)) {
15397                         u32 cfg3;
15398
15399                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15400                         if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15401                             !tg3_flag(tp, 57765_PLUS) &&
15402                             (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15403                                 tg3_flag_set(tp, ASPM_WORKAROUND);
15404                         if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15405                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15406                         if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15407                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15408                 }
15409
15410                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15411                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15412                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15413                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15414                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15415                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15416
15417                 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15418                         tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15419         }
15420 done:
15421         if (tg3_flag(tp, WOL_CAP))
15422                 device_set_wakeup_enable(&tp->pdev->dev,
15423                                          tg3_flag(tp, WOL_ENABLE));
15424         else
15425                 device_set_wakeup_capable(&tp->pdev->dev, false);
15426 }
15427
15428 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15429 {
15430         int i, err;
15431         u32 val2, off = offset * 8;
15432
15433         err = tg3_nvram_lock(tp);
15434         if (err)
15435                 return err;
15436
15437         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15438         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15439                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15440         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15441         udelay(10);
15442
15443         for (i = 0; i < 100; i++) {
15444                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15445                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15446                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15447                         break;
15448                 }
15449                 udelay(10);
15450         }
15451
15452         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15453
15454         tg3_nvram_unlock(tp);
15455         if (val2 & APE_OTP_STATUS_CMD_DONE)
15456                 return 0;
15457
15458         return -EBUSY;
15459 }
15460
15461 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15462 {
15463         int i;
15464         u32 val;
15465
15466         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15467         tw32(OTP_CTRL, cmd);
15468
15469         /* Wait for up to 1 ms for command to execute. */
15470         for (i = 0; i < 100; i++) {
15471                 val = tr32(OTP_STATUS);
15472                 if (val & OTP_STATUS_CMD_DONE)
15473                         break;
15474                 udelay(10);
15475         }
15476
15477         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15478 }
15479
15480 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15481  * configuration is a 32-bit value that straddles the alignment boundary.
15482  * We do two 32-bit reads and then shift and merge the results.
15483  */
15484 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15485 {
15486         u32 bhalf_otp, thalf_otp;
15487
15488         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15489
15490         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15491                 return 0;
15492
15493         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15494
15495         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15496                 return 0;
15497
15498         thalf_otp = tr32(OTP_READ_DATA);
15499
15500         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15501
15502         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15503                 return 0;
15504
15505         bhalf_otp = tr32(OTP_READ_DATA);
15506
15507         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15508 }
15509
15510 static void tg3_phy_init_link_config(struct tg3 *tp)
15511 {
15512         u32 adv = ADVERTISED_Autoneg;
15513
15514         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15515                 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15516                         adv |= ADVERTISED_1000baseT_Half;
15517                 adv |= ADVERTISED_1000baseT_Full;
15518         }
15519
15520         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15521                 adv |= ADVERTISED_100baseT_Half |
15522                        ADVERTISED_100baseT_Full |
15523                        ADVERTISED_10baseT_Half |
15524                        ADVERTISED_10baseT_Full |
15525                        ADVERTISED_TP;
15526         else
15527                 adv |= ADVERTISED_FIBRE;
15528
15529         tp->link_config.advertising = adv;
15530         tp->link_config.speed = SPEED_UNKNOWN;
15531         tp->link_config.duplex = DUPLEX_UNKNOWN;
15532         tp->link_config.autoneg = AUTONEG_ENABLE;
15533         tp->link_config.active_speed = SPEED_UNKNOWN;
15534         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15535
15536         tp->old_link = -1;
15537 }
15538
15539 static int tg3_phy_probe(struct tg3 *tp)
15540 {
15541         u32 hw_phy_id_1, hw_phy_id_2;
15542         u32 hw_phy_id, hw_phy_id_masked;
15543         int err;
15544
15545         /* flow control autonegotiation is default behavior */
15546         tg3_flag_set(tp, PAUSE_AUTONEG);
15547         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15548
15549         if (tg3_flag(tp, ENABLE_APE)) {
15550                 switch (tp->pci_fn) {
15551                 case 0:
15552                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15553                         break;
15554                 case 1:
15555                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15556                         break;
15557                 case 2:
15558                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15559                         break;
15560                 case 3:
15561                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15562                         break;
15563                 }
15564         }
15565
15566         if (!tg3_flag(tp, ENABLE_ASF) &&
15567             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15568             !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15569                 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15570                                    TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15571
15572         if (tg3_flag(tp, USE_PHYLIB))
15573                 return tg3_phy_init(tp);
15574
15575         /* Reading the PHY ID register can conflict with ASF
15576          * firmware access to the PHY hardware.
15577          */
15578         err = 0;
15579         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15580                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15581         } else {
15582                 /* Now read the physical PHY_ID from the chip and verify
15583                  * that it is sane.  If it doesn't look good, we fall back
15584                  * to either the hard-coded table based PHY_ID and failing
15585                  * that the value found in the eeprom area.
15586                  */
15587                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15588                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15589
15590                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15591                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15592                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15593
15594                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15595         }
15596
15597         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15598                 tp->phy_id = hw_phy_id;
15599                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15600                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15601                 else
15602                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15603         } else {
15604                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15605                         /* Do nothing, phy ID already set up in
15606                          * tg3_get_eeprom_hw_cfg().
15607                          */
15608                 } else {
15609                         struct subsys_tbl_ent *p;
15610
15611                         /* No eeprom signature?  Try the hardcoded
15612                          * subsys device table.
15613                          */
15614                         p = tg3_lookup_by_subsys(tp);
15615                         if (p) {
15616                                 tp->phy_id = p->phy_id;
15617                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15618                                 /* For now we saw the IDs 0xbc050cd0,
15619                                  * 0xbc050f80 and 0xbc050c30 on devices
15620                                  * connected to an BCM4785 and there are
15621                                  * probably more. Just assume that the phy is
15622                                  * supported when it is connected to a SSB core
15623                                  * for now.
15624                                  */
15625                                 return -ENODEV;
15626                         }
15627
15628                         if (!tp->phy_id ||
15629                             tp->phy_id == TG3_PHY_ID_BCM8002)
15630                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15631                 }
15632         }
15633
15634         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15635             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15636              tg3_asic_rev(tp) == ASIC_REV_5720 ||
15637              tg3_asic_rev(tp) == ASIC_REV_57766 ||
15638              tg3_asic_rev(tp) == ASIC_REV_5762 ||
15639              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15640               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15641              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15642               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15643                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15644
15645                 tp->eee.supported = SUPPORTED_100baseT_Full |
15646                                     SUPPORTED_1000baseT_Full;
15647                 tp->eee.advertised = ADVERTISED_100baseT_Full |
15648                                      ADVERTISED_1000baseT_Full;
15649                 tp->eee.eee_enabled = 1;
15650                 tp->eee.tx_lpi_enabled = 1;
15651                 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15652         }
15653
15654         tg3_phy_init_link_config(tp);
15655
15656         if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15657             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15658             !tg3_flag(tp, ENABLE_APE) &&
15659             !tg3_flag(tp, ENABLE_ASF)) {
15660                 u32 bmsr, dummy;
15661
15662                 tg3_readphy(tp, MII_BMSR, &bmsr);
15663                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15664                     (bmsr & BMSR_LSTATUS))
15665                         goto skip_phy_reset;
15666
15667                 err = tg3_phy_reset(tp);
15668                 if (err)
15669                         return err;
15670
15671                 tg3_phy_set_wirespeed(tp);
15672
15673                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15674                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15675                                             tp->link_config.flowctrl);
15676
15677                         tg3_writephy(tp, MII_BMCR,
15678                                      BMCR_ANENABLE | BMCR_ANRESTART);
15679                 }
15680         }
15681
15682 skip_phy_reset:
15683         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15684                 err = tg3_init_5401phy_dsp(tp);
15685                 if (err)
15686                         return err;
15687
15688                 err = tg3_init_5401phy_dsp(tp);
15689         }
15690
15691         return err;
15692 }
15693
15694 static void tg3_read_vpd(struct tg3 *tp)
15695 {
15696         u8 *vpd_data;
15697         unsigned int len, vpdlen;
15698         int i;
15699
15700         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15701         if (!vpd_data)
15702                 goto out_no_vpd;
15703
15704         i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15705                                          PCI_VPD_RO_KEYWORD_MFR_ID, &len);
15706         if (i < 0)
15707                 goto partno;
15708
15709         if (len != 4 || memcmp(vpd_data + i, "1028", 4))
15710                 goto partno;
15711
15712         i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15713                                          PCI_VPD_RO_KEYWORD_VENDOR0, &len);
15714         if (i < 0)
15715                 goto partno;
15716
15717         memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15718         snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i);
15719
15720 partno:
15721         i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15722                                          PCI_VPD_RO_KEYWORD_PARTNO, &len);
15723         if (i < 0)
15724                 goto out_not_found;
15725
15726         if (len > TG3_BPN_SIZE)
15727                 goto out_not_found;
15728
15729         memcpy(tp->board_part_number, &vpd_data[i], len);
15730
15731 out_not_found:
15732         kfree(vpd_data);
15733         if (tp->board_part_number[0])
15734                 return;
15735
15736 out_no_vpd:
15737         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15738                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15739                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15740                         strcpy(tp->board_part_number, "BCM5717");
15741                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15742                         strcpy(tp->board_part_number, "BCM5718");
15743                 else
15744                         goto nomatch;
15745         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15746                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15747                         strcpy(tp->board_part_number, "BCM57780");
15748                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15749                         strcpy(tp->board_part_number, "BCM57760");
15750                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15751                         strcpy(tp->board_part_number, "BCM57790");
15752                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15753                         strcpy(tp->board_part_number, "BCM57788");
15754                 else
15755                         goto nomatch;
15756         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15757                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15758                         strcpy(tp->board_part_number, "BCM57761");
15759                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15760                         strcpy(tp->board_part_number, "BCM57765");
15761                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15762                         strcpy(tp->board_part_number, "BCM57781");
15763                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15764                         strcpy(tp->board_part_number, "BCM57785");
15765                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15766                         strcpy(tp->board_part_number, "BCM57791");
15767                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15768                         strcpy(tp->board_part_number, "BCM57795");
15769                 else
15770                         goto nomatch;
15771         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15772                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15773                         strcpy(tp->board_part_number, "BCM57762");
15774                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15775                         strcpy(tp->board_part_number, "BCM57766");
15776                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15777                         strcpy(tp->board_part_number, "BCM57782");
15778                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15779                         strcpy(tp->board_part_number, "BCM57786");
15780                 else
15781                         goto nomatch;
15782         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15783                 strcpy(tp->board_part_number, "BCM95906");
15784         } else {
15785 nomatch:
15786                 strcpy(tp->board_part_number, "none");
15787         }
15788 }
15789
15790 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15791 {
15792         u32 val;
15793
15794         if (tg3_nvram_read(tp, offset, &val) ||
15795             (val & 0xfc000000) != 0x0c000000 ||
15796             tg3_nvram_read(tp, offset + 4, &val) ||
15797             val != 0)
15798                 return 0;
15799
15800         return 1;
15801 }
15802
15803 static void tg3_read_bc_ver(struct tg3 *tp)
15804 {
15805         u32 val, offset, start, ver_offset;
15806         int i, dst_off;
15807         bool newver = false;
15808
15809         if (tg3_nvram_read(tp, 0xc, &offset) ||
15810             tg3_nvram_read(tp, 0x4, &start))
15811                 return;
15812
15813         offset = tg3_nvram_logical_addr(tp, offset);
15814
15815         if (tg3_nvram_read(tp, offset, &val))
15816                 return;
15817
15818         if ((val & 0xfc000000) == 0x0c000000) {
15819                 if (tg3_nvram_read(tp, offset + 4, &val))
15820                         return;
15821
15822                 if (val == 0)
15823                         newver = true;
15824         }
15825
15826         dst_off = strlen(tp->fw_ver);
15827
15828         if (newver) {
15829                 if (TG3_VER_SIZE - dst_off < 16 ||
15830                     tg3_nvram_read(tp, offset + 8, &ver_offset))
15831                         return;
15832
15833                 offset = offset + ver_offset - start;
15834                 for (i = 0; i < 16; i += 4) {
15835                         __be32 v;
15836                         if (tg3_nvram_read_be32(tp, offset + i, &v))
15837                                 return;
15838
15839                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15840                 }
15841         } else {
15842                 u32 major, minor;
15843
15844                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15845                         return;
15846
15847                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15848                         TG3_NVM_BCVER_MAJSFT;
15849                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15850                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15851                          "v%d.%02d", major, minor);
15852         }
15853 }
15854
15855 static void tg3_read_hwsb_ver(struct tg3 *tp)
15856 {
15857         u32 val, major, minor;
15858
15859         /* Use native endian representation */
15860         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15861                 return;
15862
15863         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15864                 TG3_NVM_HWSB_CFG1_MAJSFT;
15865         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15866                 TG3_NVM_HWSB_CFG1_MINSFT;
15867
15868         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15869 }
15870
15871 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15872 {
15873         u32 offset, major, minor, build;
15874
15875         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15876
15877         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15878                 return;
15879
15880         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15881         case TG3_EEPROM_SB_REVISION_0:
15882                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15883                 break;
15884         case TG3_EEPROM_SB_REVISION_2:
15885                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15886                 break;
15887         case TG3_EEPROM_SB_REVISION_3:
15888                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15889                 break;
15890         case TG3_EEPROM_SB_REVISION_4:
15891                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15892                 break;
15893         case TG3_EEPROM_SB_REVISION_5:
15894                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15895                 break;
15896         case TG3_EEPROM_SB_REVISION_6:
15897                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15898                 break;
15899         default:
15900                 return;
15901         }
15902
15903         if (tg3_nvram_read(tp, offset, &val))
15904                 return;
15905
15906         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15907                 TG3_EEPROM_SB_EDH_BLD_SHFT;
15908         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15909                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15910         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15911
15912         if (minor > 99 || build > 26)
15913                 return;
15914
15915         offset = strlen(tp->fw_ver);
15916         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15917                  " v%d.%02d", major, minor);
15918
15919         if (build > 0) {
15920                 offset = strlen(tp->fw_ver);
15921                 if (offset < TG3_VER_SIZE - 1)
15922                         tp->fw_ver[offset] = 'a' + build - 1;
15923         }
15924 }
15925
15926 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15927 {
15928         u32 val, offset, start;
15929         int i, vlen;
15930
15931         for (offset = TG3_NVM_DIR_START;
15932              offset < TG3_NVM_DIR_END;
15933              offset += TG3_NVM_DIRENT_SIZE) {
15934                 if (tg3_nvram_read(tp, offset, &val))
15935                         return;
15936
15937                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15938                         break;
15939         }
15940
15941         if (offset == TG3_NVM_DIR_END)
15942                 return;
15943
15944         if (!tg3_flag(tp, 5705_PLUS))
15945                 start = 0x08000000;
15946         else if (tg3_nvram_read(tp, offset - 4, &start))
15947                 return;
15948
15949         if (tg3_nvram_read(tp, offset + 4, &offset) ||
15950             !tg3_fw_img_is_valid(tp, offset) ||
15951             tg3_nvram_read(tp, offset + 8, &val))
15952                 return;
15953
15954         offset += val - start;
15955
15956         vlen = strlen(tp->fw_ver);
15957
15958         tp->fw_ver[vlen++] = ',';
15959         tp->fw_ver[vlen++] = ' ';
15960
15961         for (i = 0; i < 4; i++) {
15962                 __be32 v;
15963                 if (tg3_nvram_read_be32(tp, offset, &v))
15964                         return;
15965
15966                 offset += sizeof(v);
15967
15968                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15969                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15970                         break;
15971                 }
15972
15973                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15974                 vlen += sizeof(v);
15975         }
15976 }
15977
15978 static void tg3_probe_ncsi(struct tg3 *tp)
15979 {
15980         u32 apedata;
15981
15982         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15983         if (apedata != APE_SEG_SIG_MAGIC)
15984                 return;
15985
15986         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15987         if (!(apedata & APE_FW_STATUS_READY))
15988                 return;
15989
15990         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15991                 tg3_flag_set(tp, APE_HAS_NCSI);
15992 }
15993
15994 static void tg3_read_dash_ver(struct tg3 *tp)
15995 {
15996         int vlen;
15997         u32 apedata;
15998         char *fwtype;
15999
16000         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
16001
16002         if (tg3_flag(tp, APE_HAS_NCSI))
16003                 fwtype = "NCSI";
16004         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
16005                 fwtype = "SMASH";
16006         else
16007                 fwtype = "DASH";
16008
16009         vlen = strlen(tp->fw_ver);
16010
16011         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
16012                  fwtype,
16013                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
16014                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
16015                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
16016                  (apedata & APE_FW_VERSION_BLDMSK));
16017 }
16018
16019 static void tg3_read_otp_ver(struct tg3 *tp)
16020 {
16021         u32 val, val2;
16022
16023         if (tg3_asic_rev(tp) != ASIC_REV_5762)
16024                 return;
16025
16026         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
16027             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
16028             TG3_OTP_MAGIC0_VALID(val)) {
16029                 u64 val64 = (u64) val << 32 | val2;
16030                 u32 ver = 0;
16031                 int i, vlen;
16032
16033                 for (i = 0; i < 7; i++) {
16034                         if ((val64 & 0xff) == 0)
16035                                 break;
16036                         ver = val64 & 0xff;
16037                         val64 >>= 8;
16038                 }
16039                 vlen = strlen(tp->fw_ver);
16040                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
16041         }
16042 }
16043
16044 static void tg3_read_fw_ver(struct tg3 *tp)
16045 {
16046         u32 val;
16047         bool vpd_vers = false;
16048
16049         if (tp->fw_ver[0] != 0)
16050                 vpd_vers = true;
16051
16052         if (tg3_flag(tp, NO_NVRAM)) {
16053                 strcat(tp->fw_ver, "sb");
16054                 tg3_read_otp_ver(tp);
16055                 return;
16056         }
16057
16058         if (tg3_nvram_read(tp, 0, &val))
16059                 return;
16060
16061         if (val == TG3_EEPROM_MAGIC)
16062                 tg3_read_bc_ver(tp);
16063         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
16064                 tg3_read_sb_ver(tp, val);
16065         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16066                 tg3_read_hwsb_ver(tp);
16067
16068         if (tg3_flag(tp, ENABLE_ASF)) {
16069                 if (tg3_flag(tp, ENABLE_APE)) {
16070                         tg3_probe_ncsi(tp);
16071                         if (!vpd_vers)
16072                                 tg3_read_dash_ver(tp);
16073                 } else if (!vpd_vers) {
16074                         tg3_read_mgmtfw_ver(tp);
16075                 }
16076         }
16077
16078         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16079 }
16080
16081 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16082 {
16083         if (tg3_flag(tp, LRG_PROD_RING_CAP))
16084                 return TG3_RX_RET_MAX_SIZE_5717;
16085         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16086                 return TG3_RX_RET_MAX_SIZE_5700;
16087         else
16088                 return TG3_RX_RET_MAX_SIZE_5705;
16089 }
16090
16091 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16092         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16093         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16094         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16095         { },
16096 };
16097
16098 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16099 {
16100         struct pci_dev *peer;
16101         unsigned int func, devnr = tp->pdev->devfn & ~7;
16102
16103         for (func = 0; func < 8; func++) {
16104                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16105                 if (peer && peer != tp->pdev)
16106                         break;
16107                 pci_dev_put(peer);
16108         }
16109         /* 5704 can be configured in single-port mode, set peer to
16110          * tp->pdev in that case.
16111          */
16112         if (!peer) {
16113                 peer = tp->pdev;
16114                 return peer;
16115         }
16116
16117         /*
16118          * We don't need to keep the refcount elevated; there's no way
16119          * to remove one half of this device without removing the other
16120          */
16121         pci_dev_put(peer);
16122
16123         return peer;
16124 }
16125
16126 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16127 {
16128         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16129         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16130                 u32 reg;
16131
16132                 /* All devices that use the alternate
16133                  * ASIC REV location have a CPMU.
16134                  */
16135                 tg3_flag_set(tp, CPMU_PRESENT);
16136
16137                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16138                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16139                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16140                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16141                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16142                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16143                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16144                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16145                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16146                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16147                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16148                         reg = TG3PCI_GEN2_PRODID_ASICREV;
16149                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16150                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16151                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16152                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16153                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16154                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16155                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16156                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16157                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16158                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16159                         reg = TG3PCI_GEN15_PRODID_ASICREV;
16160                 else
16161                         reg = TG3PCI_PRODID_ASICREV;
16162
16163                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16164         }
16165
16166         /* Wrong chip ID in 5752 A0. This code can be removed later
16167          * as A0 is not in production.
16168          */
16169         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16170                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16171
16172         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16173                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16174
16175         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16176             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16177             tg3_asic_rev(tp) == ASIC_REV_5720)
16178                 tg3_flag_set(tp, 5717_PLUS);
16179
16180         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16181             tg3_asic_rev(tp) == ASIC_REV_57766)
16182                 tg3_flag_set(tp, 57765_CLASS);
16183
16184         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16185              tg3_asic_rev(tp) == ASIC_REV_5762)
16186                 tg3_flag_set(tp, 57765_PLUS);
16187
16188         /* Intentionally exclude ASIC_REV_5906 */
16189         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16190             tg3_asic_rev(tp) == ASIC_REV_5787 ||
16191             tg3_asic_rev(tp) == ASIC_REV_5784 ||
16192             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16193             tg3_asic_rev(tp) == ASIC_REV_5785 ||
16194             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16195             tg3_flag(tp, 57765_PLUS))
16196                 tg3_flag_set(tp, 5755_PLUS);
16197
16198         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16199             tg3_asic_rev(tp) == ASIC_REV_5714)
16200                 tg3_flag_set(tp, 5780_CLASS);
16201
16202         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16203             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16204             tg3_asic_rev(tp) == ASIC_REV_5906 ||
16205             tg3_flag(tp, 5755_PLUS) ||
16206             tg3_flag(tp, 5780_CLASS))
16207                 tg3_flag_set(tp, 5750_PLUS);
16208
16209         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16210             tg3_flag(tp, 5750_PLUS))
16211                 tg3_flag_set(tp, 5705_PLUS);
16212 }
16213
16214 static bool tg3_10_100_only_device(struct tg3 *tp,
16215                                    const struct pci_device_id *ent)
16216 {
16217         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16218
16219         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16220              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16221             (tp->phy_flags & TG3_PHYFLG_IS_FET))
16222                 return true;
16223
16224         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16225                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16226                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16227                                 return true;
16228                 } else {
16229                         return true;
16230                 }
16231         }
16232
16233         return false;
16234 }
16235
16236 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16237 {
16238         u32 misc_ctrl_reg;
16239         u32 pci_state_reg, grc_misc_cfg;
16240         u32 val;
16241         u16 pci_cmd;
16242         int err;
16243
16244         /* Force memory write invalidate off.  If we leave it on,
16245          * then on 5700_BX chips we have to enable a workaround.
16246          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16247          * to match the cacheline size.  The Broadcom driver have this
16248          * workaround but turns MWI off all the times so never uses
16249          * it.  This seems to suggest that the workaround is insufficient.
16250          */
16251         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16252         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16253         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16254
16255         /* Important! -- Make sure register accesses are byteswapped
16256          * correctly.  Also, for those chips that require it, make
16257          * sure that indirect register accesses are enabled before
16258          * the first operation.
16259          */
16260         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16261                               &misc_ctrl_reg);
16262         tp->misc_host_ctrl |= (misc_ctrl_reg &
16263                                MISC_HOST_CTRL_CHIPREV);
16264         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16265                                tp->misc_host_ctrl);
16266
16267         tg3_detect_asic_rev(tp, misc_ctrl_reg);
16268
16269         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16270          * we need to disable memory and use config. cycles
16271          * only to access all registers. The 5702/03 chips
16272          * can mistakenly decode the special cycles from the
16273          * ICH chipsets as memory write cycles, causing corruption
16274          * of register and memory space. Only certain ICH bridges
16275          * will drive special cycles with non-zero data during the
16276          * address phase which can fall within the 5703's address
16277          * range. This is not an ICH bug as the PCI spec allows
16278          * non-zero address during special cycles. However, only
16279          * these ICH bridges are known to drive non-zero addresses
16280          * during special cycles.
16281          *
16282          * Since special cycles do not cross PCI bridges, we only
16283          * enable this workaround if the 5703 is on the secondary
16284          * bus of these ICH bridges.
16285          */
16286         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16287             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16288                 static struct tg3_dev_id {
16289                         u32     vendor;
16290                         u32     device;
16291                         u32     rev;
16292                 } ich_chipsets[] = {
16293                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16294                           PCI_ANY_ID },
16295                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16296                           PCI_ANY_ID },
16297                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16298                           0xa },
16299                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16300                           PCI_ANY_ID },
16301                         { },
16302                 };
16303                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16304                 struct pci_dev *bridge = NULL;
16305
16306                 while (pci_id->vendor != 0) {
16307                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
16308                                                 bridge);
16309                         if (!bridge) {
16310                                 pci_id++;
16311                                 continue;
16312                         }
16313                         if (pci_id->rev != PCI_ANY_ID) {
16314                                 if (bridge->revision > pci_id->rev)
16315                                         continue;
16316                         }
16317                         if (bridge->subordinate &&
16318                             (bridge->subordinate->number ==
16319                              tp->pdev->bus->number)) {
16320                                 tg3_flag_set(tp, ICH_WORKAROUND);
16321                                 pci_dev_put(bridge);
16322                                 break;
16323                         }
16324                 }
16325         }
16326
16327         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16328                 static struct tg3_dev_id {
16329                         u32     vendor;
16330                         u32     device;
16331                 } bridge_chipsets[] = {
16332                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16333                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16334                         { },
16335                 };
16336                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16337                 struct pci_dev *bridge = NULL;
16338
16339                 while (pci_id->vendor != 0) {
16340                         bridge = pci_get_device(pci_id->vendor,
16341                                                 pci_id->device,
16342                                                 bridge);
16343                         if (!bridge) {
16344                                 pci_id++;
16345                                 continue;
16346                         }
16347                         if (bridge->subordinate &&
16348                             (bridge->subordinate->number <=
16349                              tp->pdev->bus->number) &&
16350                             (bridge->subordinate->busn_res.end >=
16351                              tp->pdev->bus->number)) {
16352                                 tg3_flag_set(tp, 5701_DMA_BUG);
16353                                 pci_dev_put(bridge);
16354                                 break;
16355                         }
16356                 }
16357         }
16358
16359         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16360          * DMA addresses > 40-bit. This bridge may have other additional
16361          * 57xx devices behind it in some 4-port NIC designs for example.
16362          * Any tg3 device found behind the bridge will also need the 40-bit
16363          * DMA workaround.
16364          */
16365         if (tg3_flag(tp, 5780_CLASS)) {
16366                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16367                 tp->msi_cap = tp->pdev->msi_cap;
16368         } else {
16369                 struct pci_dev *bridge = NULL;
16370
16371                 do {
16372                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16373                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
16374                                                 bridge);
16375                         if (bridge && bridge->subordinate &&
16376                             (bridge->subordinate->number <=
16377                              tp->pdev->bus->number) &&
16378                             (bridge->subordinate->busn_res.end >=
16379                              tp->pdev->bus->number)) {
16380                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16381                                 pci_dev_put(bridge);
16382                                 break;
16383                         }
16384                 } while (bridge);
16385         }
16386
16387         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16388             tg3_asic_rev(tp) == ASIC_REV_5714)
16389                 tp->pdev_peer = tg3_find_peer(tp);
16390
16391         /* Determine TSO capabilities */
16392         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16393                 ; /* Do nothing. HW bug. */
16394         else if (tg3_flag(tp, 57765_PLUS))
16395                 tg3_flag_set(tp, HW_TSO_3);
16396         else if (tg3_flag(tp, 5755_PLUS) ||
16397                  tg3_asic_rev(tp) == ASIC_REV_5906)
16398                 tg3_flag_set(tp, HW_TSO_2);
16399         else if (tg3_flag(tp, 5750_PLUS)) {
16400                 tg3_flag_set(tp, HW_TSO_1);
16401                 tg3_flag_set(tp, TSO_BUG);
16402                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16403                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16404                         tg3_flag_clear(tp, TSO_BUG);
16405         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16406                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
16407                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16408                 tg3_flag_set(tp, FW_TSO);
16409                 tg3_flag_set(tp, TSO_BUG);
16410                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16411                         tp->fw_needed = FIRMWARE_TG3TSO5;
16412                 else
16413                         tp->fw_needed = FIRMWARE_TG3TSO;
16414         }
16415
16416         /* Selectively allow TSO based on operating conditions */
16417         if (tg3_flag(tp, HW_TSO_1) ||
16418             tg3_flag(tp, HW_TSO_2) ||
16419             tg3_flag(tp, HW_TSO_3) ||
16420             tg3_flag(tp, FW_TSO)) {
16421                 /* For firmware TSO, assume ASF is disabled.
16422                  * We'll disable TSO later if we discover ASF
16423                  * is enabled in tg3_get_eeprom_hw_cfg().
16424                  */
16425                 tg3_flag_set(tp, TSO_CAPABLE);
16426         } else {
16427                 tg3_flag_clear(tp, TSO_CAPABLE);
16428                 tg3_flag_clear(tp, TSO_BUG);
16429                 tp->fw_needed = NULL;
16430         }
16431
16432         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16433                 tp->fw_needed = FIRMWARE_TG3;
16434
16435         if (tg3_asic_rev(tp) == ASIC_REV_57766)
16436                 tp->fw_needed = FIRMWARE_TG357766;
16437
16438         tp->irq_max = 1;
16439
16440         if (tg3_flag(tp, 5750_PLUS)) {
16441                 tg3_flag_set(tp, SUPPORT_MSI);
16442                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16443                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16444                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16445                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16446                      tp->pdev_peer == tp->pdev))
16447                         tg3_flag_clear(tp, SUPPORT_MSI);
16448
16449                 if (tg3_flag(tp, 5755_PLUS) ||
16450                     tg3_asic_rev(tp) == ASIC_REV_5906) {
16451                         tg3_flag_set(tp, 1SHOT_MSI);
16452                 }
16453
16454                 if (tg3_flag(tp, 57765_PLUS)) {
16455                         tg3_flag_set(tp, SUPPORT_MSIX);
16456                         tp->irq_max = TG3_IRQ_MAX_VECS;
16457                 }
16458         }
16459
16460         tp->txq_max = 1;
16461         tp->rxq_max = 1;
16462         if (tp->irq_max > 1) {
16463                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16464                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16465
16466                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16467                     tg3_asic_rev(tp) == ASIC_REV_5720)
16468                         tp->txq_max = tp->irq_max - 1;
16469         }
16470
16471         if (tg3_flag(tp, 5755_PLUS) ||
16472             tg3_asic_rev(tp) == ASIC_REV_5906)
16473                 tg3_flag_set(tp, SHORT_DMA_BUG);
16474
16475         if (tg3_asic_rev(tp) == ASIC_REV_5719)
16476                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16477
16478         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16479             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16480             tg3_asic_rev(tp) == ASIC_REV_5720 ||
16481             tg3_asic_rev(tp) == ASIC_REV_5762)
16482                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16483
16484         if (tg3_flag(tp, 57765_PLUS) &&
16485             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16486                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16487
16488         if (!tg3_flag(tp, 5705_PLUS) ||
16489             tg3_flag(tp, 5780_CLASS) ||
16490             tg3_flag(tp, USE_JUMBO_BDFLAG))
16491                 tg3_flag_set(tp, JUMBO_CAPABLE);
16492
16493         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16494                               &pci_state_reg);
16495
16496         if (pci_is_pcie(tp->pdev)) {
16497                 u16 lnkctl;
16498
16499                 tg3_flag_set(tp, PCI_EXPRESS);
16500
16501                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16502                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16503                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16504                                 tg3_flag_clear(tp, HW_TSO_2);
16505                                 tg3_flag_clear(tp, TSO_CAPABLE);
16506                         }
16507                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16508                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16509                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16510                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16511                                 tg3_flag_set(tp, CLKREQ_BUG);
16512                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16513                         tg3_flag_set(tp, L1PLLPD_EN);
16514                 }
16515         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16516                 /* BCM5785 devices are effectively PCIe devices, and should
16517                  * follow PCIe codepaths, but do not have a PCIe capabilities
16518                  * section.
16519                  */
16520                 tg3_flag_set(tp, PCI_EXPRESS);
16521         } else if (!tg3_flag(tp, 5705_PLUS) ||
16522                    tg3_flag(tp, 5780_CLASS)) {
16523                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16524                 if (!tp->pcix_cap) {
16525                         dev_err(&tp->pdev->dev,
16526                                 "Cannot find PCI-X capability, aborting\n");
16527                         return -EIO;
16528                 }
16529
16530                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16531                         tg3_flag_set(tp, PCIX_MODE);
16532         }
16533
16534         /* If we have an AMD 762 or VIA K8T800 chipset, write
16535          * reordering to the mailbox registers done by the host
16536          * controller can cause major troubles.  We read back from
16537          * every mailbox register write to force the writes to be
16538          * posted to the chip in order.
16539          */
16540         if (pci_dev_present(tg3_write_reorder_chipsets) &&
16541             !tg3_flag(tp, PCI_EXPRESS))
16542                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16543
16544         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16545                              &tp->pci_cacheline_sz);
16546         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16547                              &tp->pci_lat_timer);
16548         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16549             tp->pci_lat_timer < 64) {
16550                 tp->pci_lat_timer = 64;
16551                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16552                                       tp->pci_lat_timer);
16553         }
16554
16555         /* Important! -- It is critical that the PCI-X hw workaround
16556          * situation is decided before the first MMIO register access.
16557          */
16558         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16559                 /* 5700 BX chips need to have their TX producer index
16560                  * mailboxes written twice to workaround a bug.
16561                  */
16562                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16563
16564                 /* If we are in PCI-X mode, enable register write workaround.
16565                  *
16566                  * The workaround is to use indirect register accesses
16567                  * for all chip writes not to mailbox registers.
16568                  */
16569                 if (tg3_flag(tp, PCIX_MODE)) {
16570                         u32 pm_reg;
16571
16572                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16573
16574                         /* The chip can have it's power management PCI config
16575                          * space registers clobbered due to this bug.
16576                          * So explicitly force the chip into D0 here.
16577                          */
16578                         pci_read_config_dword(tp->pdev,
16579                                               tp->pdev->pm_cap + PCI_PM_CTRL,
16580                                               &pm_reg);
16581                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16582                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16583                         pci_write_config_dword(tp->pdev,
16584                                                tp->pdev->pm_cap + PCI_PM_CTRL,
16585                                                pm_reg);
16586
16587                         /* Also, force SERR#/PERR# in PCI command. */
16588                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16589                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16590                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16591                 }
16592         }
16593
16594         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16595                 tg3_flag_set(tp, PCI_HIGH_SPEED);
16596         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16597                 tg3_flag_set(tp, PCI_32BIT);
16598
16599         /* Chip-specific fixup from Broadcom driver */
16600         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16601             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16602                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16603                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16604         }
16605
16606         /* Default fast path register access methods */
16607         tp->read32 = tg3_read32;
16608         tp->write32 = tg3_write32;
16609         tp->read32_mbox = tg3_read32;
16610         tp->write32_mbox = tg3_write32;
16611         tp->write32_tx_mbox = tg3_write32;
16612         tp->write32_rx_mbox = tg3_write32;
16613
16614         /* Various workaround register access methods */
16615         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16616                 tp->write32 = tg3_write_indirect_reg32;
16617         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16618                  (tg3_flag(tp, PCI_EXPRESS) &&
16619                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16620                 /*
16621                  * Back to back register writes can cause problems on these
16622                  * chips, the workaround is to read back all reg writes
16623                  * except those to mailbox regs.
16624                  *
16625                  * See tg3_write_indirect_reg32().
16626                  */
16627                 tp->write32 = tg3_write_flush_reg32;
16628         }
16629
16630         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16631                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16632                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16633                         tp->write32_rx_mbox = tg3_write_flush_reg32;
16634         }
16635
16636         if (tg3_flag(tp, ICH_WORKAROUND)) {
16637                 tp->read32 = tg3_read_indirect_reg32;
16638                 tp->write32 = tg3_write_indirect_reg32;
16639                 tp->read32_mbox = tg3_read_indirect_mbox;
16640                 tp->write32_mbox = tg3_write_indirect_mbox;
16641                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16642                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16643
16644                 iounmap(tp->regs);
16645                 tp->regs = NULL;
16646
16647                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16648                 pci_cmd &= ~PCI_COMMAND_MEMORY;
16649                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16650         }
16651         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16652                 tp->read32_mbox = tg3_read32_mbox_5906;
16653                 tp->write32_mbox = tg3_write32_mbox_5906;
16654                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16655                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16656         }
16657
16658         if (tp->write32 == tg3_write_indirect_reg32 ||
16659             (tg3_flag(tp, PCIX_MODE) &&
16660              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16661               tg3_asic_rev(tp) == ASIC_REV_5701)))
16662                 tg3_flag_set(tp, SRAM_USE_CONFIG);
16663
16664         /* The memory arbiter has to be enabled in order for SRAM accesses
16665          * to succeed.  Normally on powerup the tg3 chip firmware will make
16666          * sure it is enabled, but other entities such as system netboot
16667          * code might disable it.
16668          */
16669         val = tr32(MEMARB_MODE);
16670         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16671
16672         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16673         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16674             tg3_flag(tp, 5780_CLASS)) {
16675                 if (tg3_flag(tp, PCIX_MODE)) {
16676                         pci_read_config_dword(tp->pdev,
16677                                               tp->pcix_cap + PCI_X_STATUS,
16678                                               &val);
16679                         tp->pci_fn = val & 0x7;
16680                 }
16681         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16682                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16683                    tg3_asic_rev(tp) == ASIC_REV_5720) {
16684                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16685                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16686                         val = tr32(TG3_CPMU_STATUS);
16687
16688                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16689                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16690                 else
16691                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16692                                      TG3_CPMU_STATUS_FSHFT_5719;
16693         }
16694
16695         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16696                 tp->write32_tx_mbox = tg3_write_flush_reg32;
16697                 tp->write32_rx_mbox = tg3_write_flush_reg32;
16698         }
16699
16700         /* Get eeprom hw config before calling tg3_set_power_state().
16701          * In particular, the TG3_FLAG_IS_NIC flag must be
16702          * determined before calling tg3_set_power_state() so that
16703          * we know whether or not to switch out of Vaux power.
16704          * When the flag is set, it means that GPIO1 is used for eeprom
16705          * write protect and also implies that it is a LOM where GPIOs
16706          * are not used to switch power.
16707          */
16708         tg3_get_eeprom_hw_cfg(tp);
16709
16710         if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16711                 tg3_flag_clear(tp, TSO_CAPABLE);
16712                 tg3_flag_clear(tp, TSO_BUG);
16713                 tp->fw_needed = NULL;
16714         }
16715
16716         if (tg3_flag(tp, ENABLE_APE)) {
16717                 /* Allow reads and writes to the
16718                  * APE register and memory space.
16719                  */
16720                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16721                                  PCISTATE_ALLOW_APE_SHMEM_WR |
16722                                  PCISTATE_ALLOW_APE_PSPACE_WR;
16723                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16724                                        pci_state_reg);
16725
16726                 tg3_ape_lock_init(tp);
16727                 tp->ape_hb_interval =
16728                         msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16729         }
16730
16731         /* Set up tp->grc_local_ctrl before calling
16732          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16733          * will bring 5700's external PHY out of reset.
16734          * It is also used as eeprom write protect on LOMs.
16735          */
16736         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16737         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16738             tg3_flag(tp, EEPROM_WRITE_PROT))
16739                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16740                                        GRC_LCLCTRL_GPIO_OUTPUT1);
16741         /* Unused GPIO3 must be driven as output on 5752 because there
16742          * are no pull-up resistors on unused GPIO pins.
16743          */
16744         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16745                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16746
16747         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16748             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16749             tg3_flag(tp, 57765_CLASS))
16750                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16751
16752         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16753             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16754                 /* Turn off the debug UART. */
16755                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16756                 if (tg3_flag(tp, IS_NIC))
16757                         /* Keep VMain power. */
16758                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16759                                               GRC_LCLCTRL_GPIO_OUTPUT0;
16760         }
16761
16762         if (tg3_asic_rev(tp) == ASIC_REV_5762)
16763                 tp->grc_local_ctrl |=
16764                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16765
16766         /* Switch out of Vaux if it is a NIC */
16767         tg3_pwrsrc_switch_to_vmain(tp);
16768
16769         /* Derive initial jumbo mode from MTU assigned in
16770          * ether_setup() via the alloc_etherdev() call
16771          */
16772         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16773                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16774
16775         /* Determine WakeOnLan speed to use. */
16776         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16777             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16778             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16779             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16780                 tg3_flag_clear(tp, WOL_SPEED_100MB);
16781         } else {
16782                 tg3_flag_set(tp, WOL_SPEED_100MB);
16783         }
16784
16785         if (tg3_asic_rev(tp) == ASIC_REV_5906)
16786                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16787
16788         /* A few boards don't want Ethernet@WireSpeed phy feature */
16789         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16790             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16791              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16792              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16793             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16794             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16795                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16796
16797         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16798             tg3_chip_rev(tp) == CHIPREV_5704_AX)
16799                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16800         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16801                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16802
16803         if (tg3_flag(tp, 5705_PLUS) &&
16804             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16805             tg3_asic_rev(tp) != ASIC_REV_5785 &&
16806             tg3_asic_rev(tp) != ASIC_REV_57780 &&
16807             !tg3_flag(tp, 57765_PLUS)) {
16808                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16809                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
16810                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
16811                     tg3_asic_rev(tp) == ASIC_REV_5761) {
16812                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16813                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16814                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16815                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16816                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16817                 } else
16818                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16819         }
16820
16821         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16822             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16823                 tp->phy_otp = tg3_read_otp_phycfg(tp);
16824                 if (tp->phy_otp == 0)
16825                         tp->phy_otp = TG3_OTP_DEFAULT;
16826         }
16827
16828         if (tg3_flag(tp, CPMU_PRESENT))
16829                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16830         else
16831                 tp->mi_mode = MAC_MI_MODE_BASE;
16832
16833         tp->coalesce_mode = 0;
16834         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16835             tg3_chip_rev(tp) != CHIPREV_5700_BX)
16836                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16837
16838         /* Set these bits to enable statistics workaround. */
16839         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16840             tg3_asic_rev(tp) == ASIC_REV_5762 ||
16841             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16842             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16843                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16844                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16845         }
16846
16847         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16848             tg3_asic_rev(tp) == ASIC_REV_57780)
16849                 tg3_flag_set(tp, USE_PHYLIB);
16850
16851         err = tg3_mdio_init(tp);
16852         if (err)
16853                 return err;
16854
16855         /* Initialize data/descriptor byte/word swapping. */
16856         val = tr32(GRC_MODE);
16857         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16858             tg3_asic_rev(tp) == ASIC_REV_5762)
16859                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16860                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
16861                         GRC_MODE_B2HRX_ENABLE |
16862                         GRC_MODE_HTX2B_ENABLE |
16863                         GRC_MODE_HOST_STACKUP);
16864         else
16865                 val &= GRC_MODE_HOST_STACKUP;
16866
16867         tw32(GRC_MODE, val | tp->grc_mode);
16868
16869         tg3_switch_clocks(tp);
16870
16871         /* Clear this out for sanity. */
16872         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16873
16874         /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16875         tw32(TG3PCI_REG_BASE_ADDR, 0);
16876
16877         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16878                               &pci_state_reg);
16879         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16880             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16881                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16882                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16883                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16884                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16885                         void __iomem *sram_base;
16886
16887                         /* Write some dummy words into the SRAM status block
16888                          * area, see if it reads back correctly.  If the return
16889                          * value is bad, force enable the PCIX workaround.
16890                          */
16891                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16892
16893                         writel(0x00000000, sram_base);
16894                         writel(0x00000000, sram_base + 4);
16895                         writel(0xffffffff, sram_base + 4);
16896                         if (readl(sram_base) != 0x00000000)
16897                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16898                 }
16899         }
16900
16901         udelay(50);
16902         tg3_nvram_init(tp);
16903
16904         /* If the device has an NVRAM, no need to load patch firmware */
16905         if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16906             !tg3_flag(tp, NO_NVRAM))
16907                 tp->fw_needed = NULL;
16908
16909         grc_misc_cfg = tr32(GRC_MISC_CFG);
16910         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16911
16912         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16913             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16914              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16915                 tg3_flag_set(tp, IS_5788);
16916
16917         if (!tg3_flag(tp, IS_5788) &&
16918             tg3_asic_rev(tp) != ASIC_REV_5700)
16919                 tg3_flag_set(tp, TAGGED_STATUS);
16920         if (tg3_flag(tp, TAGGED_STATUS)) {
16921                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16922                                       HOSTCC_MODE_CLRTICK_TXBD);
16923
16924                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16925                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16926                                        tp->misc_host_ctrl);
16927         }
16928
16929         /* Preserve the APE MAC_MODE bits */
16930         if (tg3_flag(tp, ENABLE_APE))
16931                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16932         else
16933                 tp->mac_mode = 0;
16934
16935         if (tg3_10_100_only_device(tp, ent))
16936                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16937
16938         err = tg3_phy_probe(tp);
16939         if (err) {
16940                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16941                 /* ... but do not return immediately ... */
16942                 tg3_mdio_fini(tp);
16943         }
16944
16945         tg3_read_vpd(tp);
16946         tg3_read_fw_ver(tp);
16947
16948         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16949                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16950         } else {
16951                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16952                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16953                 else
16954                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16955         }
16956
16957         /* 5700 {AX,BX} chips have a broken status block link
16958          * change bit implementation, so we must use the
16959          * status register in those cases.
16960          */
16961         if (tg3_asic_rev(tp) == ASIC_REV_5700)
16962                 tg3_flag_set(tp, USE_LINKCHG_REG);
16963         else
16964                 tg3_flag_clear(tp, USE_LINKCHG_REG);
16965
16966         /* The led_ctrl is set during tg3_phy_probe, here we might
16967          * have to force the link status polling mechanism based
16968          * upon subsystem IDs.
16969          */
16970         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16971             tg3_asic_rev(tp) == ASIC_REV_5701 &&
16972             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16973                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16974                 tg3_flag_set(tp, USE_LINKCHG_REG);
16975         }
16976
16977         /* For all SERDES we poll the MAC status register. */
16978         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16979                 tg3_flag_set(tp, POLL_SERDES);
16980         else
16981                 tg3_flag_clear(tp, POLL_SERDES);
16982
16983         if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16984                 tg3_flag_set(tp, POLL_CPMU_LINK);
16985
16986         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16987         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16988         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16989             tg3_flag(tp, PCIX_MODE)) {
16990                 tp->rx_offset = NET_SKB_PAD;
16991 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16992                 tp->rx_copy_thresh = ~(u16)0;
16993 #endif
16994         }
16995
16996         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16997         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16998         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16999
17000         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
17001
17002         /* Increment the rx prod index on the rx std ring by at most
17003          * 8 for these chips to workaround hw errata.
17004          */
17005         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
17006             tg3_asic_rev(tp) == ASIC_REV_5752 ||
17007             tg3_asic_rev(tp) == ASIC_REV_5755)
17008                 tp->rx_std_max_post = 8;
17009
17010         if (tg3_flag(tp, ASPM_WORKAROUND))
17011                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
17012                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
17013
17014         return err;
17015 }
17016
17017 static int tg3_get_device_address(struct tg3 *tp, u8 *addr)
17018 {
17019         u32 hi, lo, mac_offset;
17020         int addr_ok = 0;
17021         int err;
17022
17023         if (!eth_platform_get_mac_address(&tp->pdev->dev, addr))
17024                 return 0;
17025
17026         if (tg3_flag(tp, IS_SSB_CORE)) {
17027                 err = ssb_gige_get_macaddr(tp->pdev, addr);
17028                 if (!err && is_valid_ether_addr(addr))
17029                         return 0;
17030         }
17031
17032         mac_offset = 0x7c;
17033         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
17034             tg3_flag(tp, 5780_CLASS)) {
17035                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
17036                         mac_offset = 0xcc;
17037                 if (tg3_nvram_lock(tp))
17038                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
17039                 else
17040                         tg3_nvram_unlock(tp);
17041         } else if (tg3_flag(tp, 5717_PLUS)) {
17042                 if (tp->pci_fn & 1)
17043                         mac_offset = 0xcc;
17044                 if (tp->pci_fn > 1)
17045                         mac_offset += 0x18c;
17046         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17047                 mac_offset = 0x10;
17048
17049         /* First try to get it from MAC address mailbox. */
17050         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17051         if ((hi >> 16) == 0x484b) {
17052                 addr[0] = (hi >>  8) & 0xff;
17053                 addr[1] = (hi >>  0) & 0xff;
17054
17055                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17056                 addr[2] = (lo >> 24) & 0xff;
17057                 addr[3] = (lo >> 16) & 0xff;
17058                 addr[4] = (lo >>  8) & 0xff;
17059                 addr[5] = (lo >>  0) & 0xff;
17060
17061                 /* Some old bootcode may report a 0 MAC address in SRAM */
17062                 addr_ok = is_valid_ether_addr(addr);
17063         }
17064         if (!addr_ok) {
17065                 /* Next, try NVRAM. */
17066                 if (!tg3_flag(tp, NO_NVRAM) &&
17067                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
17068                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
17069                         memcpy(&addr[0], ((char *)&hi) + 2, 2);
17070                         memcpy(&addr[2], (char *)&lo, sizeof(lo));
17071                 }
17072                 /* Finally just fetch it out of the MAC control regs. */
17073                 else {
17074                         hi = tr32(MAC_ADDR_0_HIGH);
17075                         lo = tr32(MAC_ADDR_0_LOW);
17076
17077                         addr[5] = lo & 0xff;
17078                         addr[4] = (lo >> 8) & 0xff;
17079                         addr[3] = (lo >> 16) & 0xff;
17080                         addr[2] = (lo >> 24) & 0xff;
17081                         addr[1] = hi & 0xff;
17082                         addr[0] = (hi >> 8) & 0xff;
17083                 }
17084         }
17085
17086         if (!is_valid_ether_addr(addr))
17087                 return -EINVAL;
17088         return 0;
17089 }
17090
17091 #define BOUNDARY_SINGLE_CACHELINE       1
17092 #define BOUNDARY_MULTI_CACHELINE        2
17093
17094 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17095 {
17096         int cacheline_size;
17097         u8 byte;
17098         int goal;
17099
17100         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17101         if (byte == 0)
17102                 cacheline_size = 1024;
17103         else
17104                 cacheline_size = (int) byte * 4;
17105
17106         /* On 5703 and later chips, the boundary bits have no
17107          * effect.
17108          */
17109         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17110             tg3_asic_rev(tp) != ASIC_REV_5701 &&
17111             !tg3_flag(tp, PCI_EXPRESS))
17112                 goto out;
17113
17114 #if defined(CONFIG_PPC64) || defined(CONFIG_PARISC)
17115         goal = BOUNDARY_MULTI_CACHELINE;
17116 #else
17117 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17118         goal = BOUNDARY_SINGLE_CACHELINE;
17119 #else
17120         goal = 0;
17121 #endif
17122 #endif
17123
17124         if (tg3_flag(tp, 57765_PLUS)) {
17125                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17126                 goto out;
17127         }
17128
17129         if (!goal)
17130                 goto out;
17131
17132         /* PCI controllers on most RISC systems tend to disconnect
17133          * when a device tries to burst across a cache-line boundary.
17134          * Therefore, letting tg3 do so just wastes PCI bandwidth.
17135          *
17136          * Unfortunately, for PCI-E there are only limited
17137          * write-side controls for this, and thus for reads
17138          * we will still get the disconnects.  We'll also waste
17139          * these PCI cycles for both read and write for chips
17140          * other than 5700 and 5701 which do not implement the
17141          * boundary bits.
17142          */
17143         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17144                 switch (cacheline_size) {
17145                 case 16:
17146                 case 32:
17147                 case 64:
17148                 case 128:
17149                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17150                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17151                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17152                         } else {
17153                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17154                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17155                         }
17156                         break;
17157
17158                 case 256:
17159                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17160                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17161                         break;
17162
17163                 default:
17164                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17165                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17166                         break;
17167                 }
17168         } else if (tg3_flag(tp, PCI_EXPRESS)) {
17169                 switch (cacheline_size) {
17170                 case 16:
17171                 case 32:
17172                 case 64:
17173                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17174                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17175                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17176                                 break;
17177                         }
17178                         fallthrough;
17179                 case 128:
17180                 default:
17181                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17182                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17183                         break;
17184                 }
17185         } else {
17186                 switch (cacheline_size) {
17187                 case 16:
17188                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17189                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17190                                         DMA_RWCTRL_WRITE_BNDRY_16);
17191                                 break;
17192                         }
17193                         fallthrough;
17194                 case 32:
17195                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17196                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17197                                         DMA_RWCTRL_WRITE_BNDRY_32);
17198                                 break;
17199                         }
17200                         fallthrough;
17201                 case 64:
17202                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17203                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17204                                         DMA_RWCTRL_WRITE_BNDRY_64);
17205                                 break;
17206                         }
17207                         fallthrough;
17208                 case 128:
17209                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17210                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17211                                         DMA_RWCTRL_WRITE_BNDRY_128);
17212                                 break;
17213                         }
17214                         fallthrough;
17215                 case 256:
17216                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
17217                                 DMA_RWCTRL_WRITE_BNDRY_256);
17218                         break;
17219                 case 512:
17220                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
17221                                 DMA_RWCTRL_WRITE_BNDRY_512);
17222                         break;
17223                 case 1024:
17224                 default:
17225                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17226                                 DMA_RWCTRL_WRITE_BNDRY_1024);
17227                         break;
17228                 }
17229         }
17230
17231 out:
17232         return val;
17233 }
17234
17235 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17236                            int size, bool to_device)
17237 {
17238         struct tg3_internal_buffer_desc test_desc;
17239         u32 sram_dma_descs;
17240         int i, ret;
17241
17242         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17243
17244         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17245         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17246         tw32(RDMAC_STATUS, 0);
17247         tw32(WDMAC_STATUS, 0);
17248
17249         tw32(BUFMGR_MODE, 0);
17250         tw32(FTQ_RESET, 0);
17251
17252         test_desc.addr_hi = ((u64) buf_dma) >> 32;
17253         test_desc.addr_lo = buf_dma & 0xffffffff;
17254         test_desc.nic_mbuf = 0x00002100;
17255         test_desc.len = size;
17256
17257         /*
17258          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17259          * the *second* time the tg3 driver was getting loaded after an
17260          * initial scan.
17261          *
17262          * Broadcom tells me:
17263          *   ...the DMA engine is connected to the GRC block and a DMA
17264          *   reset may affect the GRC block in some unpredictable way...
17265          *   The behavior of resets to individual blocks has not been tested.
17266          *
17267          * Broadcom noted the GRC reset will also reset all sub-components.
17268          */
17269         if (to_device) {
17270                 test_desc.cqid_sqid = (13 << 8) | 2;
17271
17272                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17273                 udelay(40);
17274         } else {
17275                 test_desc.cqid_sqid = (16 << 8) | 7;
17276
17277                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17278                 udelay(40);
17279         }
17280         test_desc.flags = 0x00000005;
17281
17282         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17283                 u32 val;
17284
17285                 val = *(((u32 *)&test_desc) + i);
17286                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17287                                        sram_dma_descs + (i * sizeof(u32)));
17288                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17289         }
17290         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17291
17292         if (to_device)
17293                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17294         else
17295                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17296
17297         ret = -ENODEV;
17298         for (i = 0; i < 40; i++) {
17299                 u32 val;
17300
17301                 if (to_device)
17302                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17303                 else
17304                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17305                 if ((val & 0xffff) == sram_dma_descs) {
17306                         ret = 0;
17307                         break;
17308                 }
17309
17310                 udelay(100);
17311         }
17312
17313         return ret;
17314 }
17315
17316 #define TEST_BUFFER_SIZE        0x2000
17317
17318 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17319         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17320         { },
17321 };
17322
17323 static int tg3_test_dma(struct tg3 *tp)
17324 {
17325         dma_addr_t buf_dma;
17326         u32 *buf, saved_dma_rwctrl;
17327         int ret = 0;
17328
17329         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17330                                  &buf_dma, GFP_KERNEL);
17331         if (!buf) {
17332                 ret = -ENOMEM;
17333                 goto out_nofree;
17334         }
17335
17336         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17337                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17338
17339         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17340
17341         if (tg3_flag(tp, 57765_PLUS))
17342                 goto out;
17343
17344         if (tg3_flag(tp, PCI_EXPRESS)) {
17345                 /* DMA read watermark not used on PCIE */
17346                 tp->dma_rwctrl |= 0x00180000;
17347         } else if (!tg3_flag(tp, PCIX_MODE)) {
17348                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17349                     tg3_asic_rev(tp) == ASIC_REV_5750)
17350                         tp->dma_rwctrl |= 0x003f0000;
17351                 else
17352                         tp->dma_rwctrl |= 0x003f000f;
17353         } else {
17354                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17355                     tg3_asic_rev(tp) == ASIC_REV_5704) {
17356                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17357                         u32 read_water = 0x7;
17358
17359                         /* If the 5704 is behind the EPB bridge, we can
17360                          * do the less restrictive ONE_DMA workaround for
17361                          * better performance.
17362                          */
17363                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17364                             tg3_asic_rev(tp) == ASIC_REV_5704)
17365                                 tp->dma_rwctrl |= 0x8000;
17366                         else if (ccval == 0x6 || ccval == 0x7)
17367                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17368
17369                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
17370                                 read_water = 4;
17371                         /* Set bit 23 to enable PCIX hw bug fix */
17372                         tp->dma_rwctrl |=
17373                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17374                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17375                                 (1 << 23);
17376                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17377                         /* 5780 always in PCIX mode */
17378                         tp->dma_rwctrl |= 0x00144000;
17379                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17380                         /* 5714 always in PCIX mode */
17381                         tp->dma_rwctrl |= 0x00148000;
17382                 } else {
17383                         tp->dma_rwctrl |= 0x001b000f;
17384                 }
17385         }
17386         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17387                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17388
17389         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17390             tg3_asic_rev(tp) == ASIC_REV_5704)
17391                 tp->dma_rwctrl &= 0xfffffff0;
17392
17393         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17394             tg3_asic_rev(tp) == ASIC_REV_5701) {
17395                 /* Remove this if it causes problems for some boards. */
17396                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17397
17398                 /* On 5700/5701 chips, we need to set this bit.
17399                  * Otherwise the chip will issue cacheline transactions
17400                  * to streamable DMA memory with not all the byte
17401                  * enables turned on.  This is an error on several
17402                  * RISC PCI controllers, in particular sparc64.
17403                  *
17404                  * On 5703/5704 chips, this bit has been reassigned
17405                  * a different meaning.  In particular, it is used
17406                  * on those chips to enable a PCI-X workaround.
17407                  */
17408                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17409         }
17410
17411         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17412
17413
17414         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17415             tg3_asic_rev(tp) != ASIC_REV_5701)
17416                 goto out;
17417
17418         /* It is best to perform DMA test with maximum write burst size
17419          * to expose the 5700/5701 write DMA bug.
17420          */
17421         saved_dma_rwctrl = tp->dma_rwctrl;
17422         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17423         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17424
17425         while (1) {
17426                 u32 *p = buf, i;
17427
17428                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17429                         p[i] = i;
17430
17431                 /* Send the buffer to the chip. */
17432                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17433                 if (ret) {
17434                         dev_err(&tp->pdev->dev,
17435                                 "%s: Buffer write failed. err = %d\n",
17436                                 __func__, ret);
17437                         break;
17438                 }
17439
17440                 /* Now read it back. */
17441                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17442                 if (ret) {
17443                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17444                                 "err = %d\n", __func__, ret);
17445                         break;
17446                 }
17447
17448                 /* Verify it. */
17449                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17450                         if (p[i] == i)
17451                                 continue;
17452
17453                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17454                             DMA_RWCTRL_WRITE_BNDRY_16) {
17455                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17456                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17457                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17458                                 break;
17459                         } else {
17460                                 dev_err(&tp->pdev->dev,
17461                                         "%s: Buffer corrupted on read back! "
17462                                         "(%d != %d)\n", __func__, p[i], i);
17463                                 ret = -ENODEV;
17464                                 goto out;
17465                         }
17466                 }
17467
17468                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17469                         /* Success. */
17470                         ret = 0;
17471                         break;
17472                 }
17473         }
17474         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17475             DMA_RWCTRL_WRITE_BNDRY_16) {
17476                 /* DMA test passed without adjusting DMA boundary,
17477                  * now look for chipsets that are known to expose the
17478                  * DMA bug without failing the test.
17479                  */
17480                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17481                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17482                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17483                 } else {
17484                         /* Safe to use the calculated DMA boundary. */
17485                         tp->dma_rwctrl = saved_dma_rwctrl;
17486                 }
17487
17488                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17489         }
17490
17491 out:
17492         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17493 out_nofree:
17494         return ret;
17495 }
17496
17497 static void tg3_init_bufmgr_config(struct tg3 *tp)
17498 {
17499         if (tg3_flag(tp, 57765_PLUS)) {
17500                 tp->bufmgr_config.mbuf_read_dma_low_water =
17501                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17502                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17503                         DEFAULT_MB_MACRX_LOW_WATER_57765;
17504                 tp->bufmgr_config.mbuf_high_water =
17505                         DEFAULT_MB_HIGH_WATER_57765;
17506
17507                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17508                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17509                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17510                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17511                 tp->bufmgr_config.mbuf_high_water_jumbo =
17512                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17513         } else if (tg3_flag(tp, 5705_PLUS)) {
17514                 tp->bufmgr_config.mbuf_read_dma_low_water =
17515                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17516                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17517                         DEFAULT_MB_MACRX_LOW_WATER_5705;
17518                 tp->bufmgr_config.mbuf_high_water =
17519                         DEFAULT_MB_HIGH_WATER_5705;
17520                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17521                         tp->bufmgr_config.mbuf_mac_rx_low_water =
17522                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
17523                         tp->bufmgr_config.mbuf_high_water =
17524                                 DEFAULT_MB_HIGH_WATER_5906;
17525                 }
17526
17527                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17528                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17529                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17530                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17531                 tp->bufmgr_config.mbuf_high_water_jumbo =
17532                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17533         } else {
17534                 tp->bufmgr_config.mbuf_read_dma_low_water =
17535                         DEFAULT_MB_RDMA_LOW_WATER;
17536                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17537                         DEFAULT_MB_MACRX_LOW_WATER;
17538                 tp->bufmgr_config.mbuf_high_water =
17539                         DEFAULT_MB_HIGH_WATER;
17540
17541                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17542                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17543                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17544                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17545                 tp->bufmgr_config.mbuf_high_water_jumbo =
17546                         DEFAULT_MB_HIGH_WATER_JUMBO;
17547         }
17548
17549         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17550         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17551 }
17552
17553 static char *tg3_phy_string(struct tg3 *tp)
17554 {
17555         switch (tp->phy_id & TG3_PHY_ID_MASK) {
17556         case TG3_PHY_ID_BCM5400:        return "5400";
17557         case TG3_PHY_ID_BCM5401:        return "5401";
17558         case TG3_PHY_ID_BCM5411:        return "5411";
17559         case TG3_PHY_ID_BCM5701:        return "5701";
17560         case TG3_PHY_ID_BCM5703:        return "5703";
17561         case TG3_PHY_ID_BCM5704:        return "5704";
17562         case TG3_PHY_ID_BCM5705:        return "5705";
17563         case TG3_PHY_ID_BCM5750:        return "5750";
17564         case TG3_PHY_ID_BCM5752:        return "5752";
17565         case TG3_PHY_ID_BCM5714:        return "5714";
17566         case TG3_PHY_ID_BCM5780:        return "5780";
17567         case TG3_PHY_ID_BCM5755:        return "5755";
17568         case TG3_PHY_ID_BCM5787:        return "5787";
17569         case TG3_PHY_ID_BCM5784:        return "5784";
17570         case TG3_PHY_ID_BCM5756:        return "5722/5756";
17571         case TG3_PHY_ID_BCM5906:        return "5906";
17572         case TG3_PHY_ID_BCM5761:        return "5761";
17573         case TG3_PHY_ID_BCM5718C:       return "5718C";
17574         case TG3_PHY_ID_BCM5718S:       return "5718S";
17575         case TG3_PHY_ID_BCM57765:       return "57765";
17576         case TG3_PHY_ID_BCM5719C:       return "5719C";
17577         case TG3_PHY_ID_BCM5720C:       return "5720C";
17578         case TG3_PHY_ID_BCM5762:        return "5762C";
17579         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
17580         case 0:                 return "serdes";
17581         default:                return "unknown";
17582         }
17583 }
17584
17585 static char *tg3_bus_string(struct tg3 *tp, char *str)
17586 {
17587         if (tg3_flag(tp, PCI_EXPRESS)) {
17588                 strcpy(str, "PCI Express");
17589                 return str;
17590         } else if (tg3_flag(tp, PCIX_MODE)) {
17591                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17592
17593                 strcpy(str, "PCIX:");
17594
17595                 if ((clock_ctrl == 7) ||
17596                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17597                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17598                         strcat(str, "133MHz");
17599                 else if (clock_ctrl == 0)
17600                         strcat(str, "33MHz");
17601                 else if (clock_ctrl == 2)
17602                         strcat(str, "50MHz");
17603                 else if (clock_ctrl == 4)
17604                         strcat(str, "66MHz");
17605                 else if (clock_ctrl == 6)
17606                         strcat(str, "100MHz");
17607         } else {
17608                 strcpy(str, "PCI:");
17609                 if (tg3_flag(tp, PCI_HIGH_SPEED))
17610                         strcat(str, "66MHz");
17611                 else
17612                         strcat(str, "33MHz");
17613         }
17614         if (tg3_flag(tp, PCI_32BIT))
17615                 strcat(str, ":32-bit");
17616         else
17617                 strcat(str, ":64-bit");
17618         return str;
17619 }
17620
17621 static void tg3_init_coal(struct tg3 *tp)
17622 {
17623         struct ethtool_coalesce *ec = &tp->coal;
17624
17625         memset(ec, 0, sizeof(*ec));
17626         ec->cmd = ETHTOOL_GCOALESCE;
17627         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17628         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17629         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17630         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17631         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17632         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17633         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17634         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17635         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17636
17637         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17638                                  HOSTCC_MODE_CLRTICK_TXBD)) {
17639                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17640                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17641                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17642                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17643         }
17644
17645         if (tg3_flag(tp, 5705_PLUS)) {
17646                 ec->rx_coalesce_usecs_irq = 0;
17647                 ec->tx_coalesce_usecs_irq = 0;
17648                 ec->stats_block_coalesce_usecs = 0;
17649         }
17650 }
17651
17652 static int tg3_init_one(struct pci_dev *pdev,
17653                                   const struct pci_device_id *ent)
17654 {
17655         struct net_device *dev;
17656         struct tg3 *tp;
17657         int i, err;
17658         u32 sndmbx, rcvmbx, intmbx;
17659         char str[40];
17660         u64 dma_mask, persist_dma_mask;
17661         netdev_features_t features = 0;
17662         u8 addr[ETH_ALEN] __aligned(2);
17663
17664         err = pci_enable_device(pdev);
17665         if (err) {
17666                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17667                 return err;
17668         }
17669
17670         err = pci_request_regions(pdev, DRV_MODULE_NAME);
17671         if (err) {
17672                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17673                 goto err_out_disable_pdev;
17674         }
17675
17676         pci_set_master(pdev);
17677
17678         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17679         if (!dev) {
17680                 err = -ENOMEM;
17681                 goto err_out_free_res;
17682         }
17683
17684         SET_NETDEV_DEV(dev, &pdev->dev);
17685
17686         tp = netdev_priv(dev);
17687         tp->pdev = pdev;
17688         tp->dev = dev;
17689         tp->rx_mode = TG3_DEF_RX_MODE;
17690         tp->tx_mode = TG3_DEF_TX_MODE;
17691         tp->irq_sync = 1;
17692         tp->pcierr_recovery = false;
17693
17694         if (tg3_debug > 0)
17695                 tp->msg_enable = tg3_debug;
17696         else
17697                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17698
17699         if (pdev_is_ssb_gige_core(pdev)) {
17700                 tg3_flag_set(tp, IS_SSB_CORE);
17701                 if (ssb_gige_must_flush_posted_writes(pdev))
17702                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17703                 if (ssb_gige_one_dma_at_once(pdev))
17704                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17705                 if (ssb_gige_have_roboswitch(pdev)) {
17706                         tg3_flag_set(tp, USE_PHYLIB);
17707                         tg3_flag_set(tp, ROBOSWITCH);
17708                 }
17709                 if (ssb_gige_is_rgmii(pdev))
17710                         tg3_flag_set(tp, RGMII_MODE);
17711         }
17712
17713         /* The word/byte swap controls here control register access byte
17714          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17715          * setting below.
17716          */
17717         tp->misc_host_ctrl =
17718                 MISC_HOST_CTRL_MASK_PCI_INT |
17719                 MISC_HOST_CTRL_WORD_SWAP |
17720                 MISC_HOST_CTRL_INDIR_ACCESS |
17721                 MISC_HOST_CTRL_PCISTATE_RW;
17722
17723         /* The NONFRM (non-frame) byte/word swap controls take effect
17724          * on descriptor entries, anything which isn't packet data.
17725          *
17726          * The StrongARM chips on the board (one for tx, one for rx)
17727          * are running in big-endian mode.
17728          */
17729         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17730                         GRC_MODE_WSWAP_NONFRM_DATA);
17731 #ifdef __BIG_ENDIAN
17732         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17733 #endif
17734         spin_lock_init(&tp->lock);
17735         spin_lock_init(&tp->indirect_lock);
17736         INIT_WORK(&tp->reset_task, tg3_reset_task);
17737
17738         tp->regs = pci_ioremap_bar(pdev, BAR_0);
17739         if (!tp->regs) {
17740                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17741                 err = -ENOMEM;
17742                 goto err_out_free_dev;
17743         }
17744
17745         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17746             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17747             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17748             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17749             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17750             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17751             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17752             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17753             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17754             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17755             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17756             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17757             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17758             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17759             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17760                 tg3_flag_set(tp, ENABLE_APE);
17761                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17762                 if (!tp->aperegs) {
17763                         dev_err(&pdev->dev,
17764                                 "Cannot map APE registers, aborting\n");
17765                         err = -ENOMEM;
17766                         goto err_out_iounmap;
17767                 }
17768         }
17769
17770         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17771         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17772
17773         dev->ethtool_ops = &tg3_ethtool_ops;
17774         dev->watchdog_timeo = TG3_TX_TIMEOUT;
17775         dev->netdev_ops = &tg3_netdev_ops;
17776         dev->irq = pdev->irq;
17777
17778         err = tg3_get_invariants(tp, ent);
17779         if (err) {
17780                 dev_err(&pdev->dev,
17781                         "Problem fetching invariants of chip, aborting\n");
17782                 goto err_out_apeunmap;
17783         }
17784
17785         /* The EPB bridge inside 5714, 5715, and 5780 and any
17786          * device behind the EPB cannot support DMA addresses > 40-bit.
17787          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17788          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17789          * do DMA address check in __tg3_start_xmit().
17790          */
17791         if (tg3_flag(tp, IS_5788))
17792                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17793         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17794                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17795 #ifdef CONFIG_HIGHMEM
17796                 dma_mask = DMA_BIT_MASK(64);
17797 #endif
17798         } else
17799                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17800
17801         /* Configure DMA attributes. */
17802         if (dma_mask > DMA_BIT_MASK(32)) {
17803                 err = dma_set_mask(&pdev->dev, dma_mask);
17804                 if (!err) {
17805                         features |= NETIF_F_HIGHDMA;
17806                         err = dma_set_coherent_mask(&pdev->dev,
17807                                                     persist_dma_mask);
17808                         if (err < 0) {
17809                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17810                                         "DMA for consistent allocations\n");
17811                                 goto err_out_apeunmap;
17812                         }
17813                 }
17814         }
17815         if (err || dma_mask == DMA_BIT_MASK(32)) {
17816                 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
17817                 if (err) {
17818                         dev_err(&pdev->dev,
17819                                 "No usable DMA configuration, aborting\n");
17820                         goto err_out_apeunmap;
17821                 }
17822         }
17823
17824         tg3_init_bufmgr_config(tp);
17825
17826         /* 5700 B0 chips do not support checksumming correctly due
17827          * to hardware bugs.
17828          */
17829         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17830                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17831
17832                 if (tg3_flag(tp, 5755_PLUS))
17833                         features |= NETIF_F_IPV6_CSUM;
17834         }
17835
17836         /* TSO is on by default on chips that support hardware TSO.
17837          * Firmware TSO on older chips gives lower performance, so it
17838          * is off by default, but can be enabled using ethtool.
17839          */
17840         if ((tg3_flag(tp, HW_TSO_1) ||
17841              tg3_flag(tp, HW_TSO_2) ||
17842              tg3_flag(tp, HW_TSO_3)) &&
17843             (features & NETIF_F_IP_CSUM))
17844                 features |= NETIF_F_TSO;
17845         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17846                 if (features & NETIF_F_IPV6_CSUM)
17847                         features |= NETIF_F_TSO6;
17848                 if (tg3_flag(tp, HW_TSO_3) ||
17849                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
17850                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17851                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17852                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
17853                     tg3_asic_rev(tp) == ASIC_REV_57780)
17854                         features |= NETIF_F_TSO_ECN;
17855         }
17856
17857         dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17858                          NETIF_F_HW_VLAN_CTAG_RX;
17859         dev->vlan_features |= features;
17860
17861         /*
17862          * Add loopback capability only for a subset of devices that support
17863          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17864          * loopback for the remaining devices.
17865          */
17866         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17867             !tg3_flag(tp, CPMU_PRESENT))
17868                 /* Add the loopback capability */
17869                 features |= NETIF_F_LOOPBACK;
17870
17871         dev->hw_features |= features;
17872         dev->priv_flags |= IFF_UNICAST_FLT;
17873
17874         /* MTU range: 60 - 9000 or 1500, depending on hardware */
17875         dev->min_mtu = TG3_MIN_MTU;
17876         dev->max_mtu = TG3_MAX_MTU(tp);
17877
17878         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17879             !tg3_flag(tp, TSO_CAPABLE) &&
17880             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17881                 tg3_flag_set(tp, MAX_RXPEND_64);
17882                 tp->rx_pending = 63;
17883         }
17884
17885         err = tg3_get_device_address(tp, addr);
17886         if (err) {
17887                 dev_err(&pdev->dev,
17888                         "Could not obtain valid ethernet address, aborting\n");
17889                 goto err_out_apeunmap;
17890         }
17891         eth_hw_addr_set(dev, addr);
17892
17893         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17894         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17895         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17896         for (i = 0; i < tp->irq_max; i++) {
17897                 struct tg3_napi *tnapi = &tp->napi[i];
17898
17899                 tnapi->tp = tp;
17900                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17901
17902                 tnapi->int_mbox = intmbx;
17903                 intmbx += 0x8;
17904
17905                 tnapi->consmbox = rcvmbx;
17906                 tnapi->prodmbox = sndmbx;
17907
17908                 if (i)
17909                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17910                 else
17911                         tnapi->coal_now = HOSTCC_MODE_NOW;
17912
17913                 if (!tg3_flag(tp, SUPPORT_MSIX))
17914                         break;
17915
17916                 /*
17917                  * If we support MSIX, we'll be using RSS.  If we're using
17918                  * RSS, the first vector only handles link interrupts and the
17919                  * remaining vectors handle rx and tx interrupts.  Reuse the
17920                  * mailbox values for the next iteration.  The values we setup
17921                  * above are still useful for the single vectored mode.
17922                  */
17923                 if (!i)
17924                         continue;
17925
17926                 rcvmbx += 0x8;
17927
17928                 if (sndmbx & 0x4)
17929                         sndmbx -= 0x4;
17930                 else
17931                         sndmbx += 0xc;
17932         }
17933
17934         /*
17935          * Reset chip in case UNDI or EFI driver did not shutdown
17936          * DMA self test will enable WDMAC and we'll see (spurious)
17937          * pending DMA on the PCI bus at that point.
17938          */
17939         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17940             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17941                 tg3_full_lock(tp, 0);
17942                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17943                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17944                 tg3_full_unlock(tp);
17945         }
17946
17947         err = tg3_test_dma(tp);
17948         if (err) {
17949                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17950                 goto err_out_apeunmap;
17951         }
17952
17953         tg3_init_coal(tp);
17954
17955         pci_set_drvdata(pdev, dev);
17956
17957         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17958             tg3_asic_rev(tp) == ASIC_REV_5720 ||
17959             tg3_asic_rev(tp) == ASIC_REV_5762)
17960                 tg3_flag_set(tp, PTP_CAPABLE);
17961
17962         tg3_timer_init(tp);
17963
17964         tg3_carrier_off(tp);
17965
17966         err = register_netdev(dev);
17967         if (err) {
17968                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17969                 goto err_out_apeunmap;
17970         }
17971
17972         if (tg3_flag(tp, PTP_CAPABLE)) {
17973                 tg3_ptp_init(tp);
17974                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17975                                                    &tp->pdev->dev);
17976                 if (IS_ERR(tp->ptp_clock))
17977                         tp->ptp_clock = NULL;
17978         }
17979
17980         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17981                     tp->board_part_number,
17982                     tg3_chip_rev_id(tp),
17983                     tg3_bus_string(tp, str),
17984                     dev->dev_addr);
17985
17986         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17987                 char *ethtype;
17988
17989                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17990                         ethtype = "10/100Base-TX";
17991                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17992                         ethtype = "1000Base-SX";
17993                 else
17994                         ethtype = "10/100/1000Base-T";
17995
17996                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17997                             "(WireSpeed[%d], EEE[%d])\n",
17998                             tg3_phy_string(tp), ethtype,
17999                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
18000                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
18001         }
18002
18003         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
18004                     (dev->features & NETIF_F_RXCSUM) != 0,
18005                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
18006                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
18007                     tg3_flag(tp, ENABLE_ASF) != 0,
18008                     tg3_flag(tp, TSO_CAPABLE) != 0);
18009         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
18010                     tp->dma_rwctrl,
18011                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
18012                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
18013
18014         pci_save_state(pdev);
18015
18016         return 0;
18017
18018 err_out_apeunmap:
18019         if (tp->aperegs) {
18020                 iounmap(tp->aperegs);
18021                 tp->aperegs = NULL;
18022         }
18023
18024 err_out_iounmap:
18025         if (tp->regs) {
18026                 iounmap(tp->regs);
18027                 tp->regs = NULL;
18028         }
18029
18030 err_out_free_dev:
18031         free_netdev(dev);
18032
18033 err_out_free_res:
18034         pci_release_regions(pdev);
18035
18036 err_out_disable_pdev:
18037         if (pci_is_enabled(pdev))
18038                 pci_disable_device(pdev);
18039         return err;
18040 }
18041
18042 static void tg3_remove_one(struct pci_dev *pdev)
18043 {
18044         struct net_device *dev = pci_get_drvdata(pdev);
18045
18046         if (dev) {
18047                 struct tg3 *tp = netdev_priv(dev);
18048
18049                 tg3_ptp_fini(tp);
18050
18051                 release_firmware(tp->fw);
18052
18053                 tg3_reset_task_cancel(tp);
18054
18055                 if (tg3_flag(tp, USE_PHYLIB)) {
18056                         tg3_phy_fini(tp);
18057                         tg3_mdio_fini(tp);
18058                 }
18059
18060                 unregister_netdev(dev);
18061                 if (tp->aperegs) {
18062                         iounmap(tp->aperegs);
18063                         tp->aperegs = NULL;
18064                 }
18065                 if (tp->regs) {
18066                         iounmap(tp->regs);
18067                         tp->regs = NULL;
18068                 }
18069                 free_netdev(dev);
18070                 pci_release_regions(pdev);
18071                 pci_disable_device(pdev);
18072         }
18073 }
18074
18075 #ifdef CONFIG_PM_SLEEP
18076 static int tg3_suspend(struct device *device)
18077 {
18078         struct net_device *dev = dev_get_drvdata(device);
18079         struct tg3 *tp = netdev_priv(dev);
18080         int err = 0;
18081
18082         rtnl_lock();
18083
18084         if (!netif_running(dev))
18085                 goto unlock;
18086
18087         tg3_reset_task_cancel(tp);
18088         tg3_phy_stop(tp);
18089         tg3_netif_stop(tp);
18090
18091         tg3_timer_stop(tp);
18092
18093         tg3_full_lock(tp, 1);
18094         tg3_disable_ints(tp);
18095         tg3_full_unlock(tp);
18096
18097         netif_device_detach(dev);
18098
18099         tg3_full_lock(tp, 0);
18100         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18101         tg3_flag_clear(tp, INIT_COMPLETE);
18102         tg3_full_unlock(tp);
18103
18104         err = tg3_power_down_prepare(tp);
18105         if (err) {
18106                 int err2;
18107
18108                 tg3_full_lock(tp, 0);
18109
18110                 tg3_flag_set(tp, INIT_COMPLETE);
18111                 err2 = tg3_restart_hw(tp, true);
18112                 if (err2)
18113                         goto out;
18114
18115                 tg3_timer_start(tp);
18116
18117                 netif_device_attach(dev);
18118                 tg3_netif_start(tp);
18119
18120 out:
18121                 tg3_full_unlock(tp);
18122
18123                 if (!err2)
18124                         tg3_phy_start(tp);
18125         }
18126
18127 unlock:
18128         rtnl_unlock();
18129         return err;
18130 }
18131
18132 static int tg3_resume(struct device *device)
18133 {
18134         struct net_device *dev = dev_get_drvdata(device);
18135         struct tg3 *tp = netdev_priv(dev);
18136         int err = 0;
18137
18138         rtnl_lock();
18139
18140         if (!netif_running(dev))
18141                 goto unlock;
18142
18143         netif_device_attach(dev);
18144
18145         tg3_full_lock(tp, 0);
18146
18147         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18148
18149         tg3_flag_set(tp, INIT_COMPLETE);
18150         err = tg3_restart_hw(tp,
18151                              !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18152         if (err)
18153                 goto out;
18154
18155         tg3_timer_start(tp);
18156
18157         tg3_netif_start(tp);
18158
18159 out:
18160         tg3_full_unlock(tp);
18161
18162         if (!err)
18163                 tg3_phy_start(tp);
18164
18165 unlock:
18166         rtnl_unlock();
18167         return err;
18168 }
18169 #endif /* CONFIG_PM_SLEEP */
18170
18171 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18172
18173 static void tg3_shutdown(struct pci_dev *pdev)
18174 {
18175         struct net_device *dev = pci_get_drvdata(pdev);
18176         struct tg3 *tp = netdev_priv(dev);
18177
18178         tg3_reset_task_cancel(tp);
18179
18180         rtnl_lock();
18181
18182         netif_device_detach(dev);
18183
18184         if (netif_running(dev))
18185                 dev_close(dev);
18186
18187         if (system_state == SYSTEM_POWER_OFF)
18188                 tg3_power_down(tp);
18189
18190         rtnl_unlock();
18191
18192         pci_disable_device(pdev);
18193 }
18194
18195 /**
18196  * tg3_io_error_detected - called when PCI error is detected
18197  * @pdev: Pointer to PCI device
18198  * @state: The current pci connection state
18199  *
18200  * This function is called after a PCI bus error affecting
18201  * this device has been detected.
18202  */
18203 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18204                                               pci_channel_state_t state)
18205 {
18206         struct net_device *netdev = pci_get_drvdata(pdev);
18207         struct tg3 *tp = netdev_priv(netdev);
18208         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18209
18210         netdev_info(netdev, "PCI I/O error detected\n");
18211
18212         /* Want to make sure that the reset task doesn't run */
18213         tg3_reset_task_cancel(tp);
18214
18215         rtnl_lock();
18216
18217         /* Could be second call or maybe we don't have netdev yet */
18218         if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
18219                 goto done;
18220
18221         /* We needn't recover from permanent error */
18222         if (state == pci_channel_io_frozen)
18223                 tp->pcierr_recovery = true;
18224
18225         tg3_phy_stop(tp);
18226
18227         tg3_netif_stop(tp);
18228
18229         tg3_timer_stop(tp);
18230
18231         netif_device_detach(netdev);
18232
18233         /* Clean up software state, even if MMIO is blocked */
18234         tg3_full_lock(tp, 0);
18235         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18236         tg3_full_unlock(tp);
18237
18238 done:
18239         if (state == pci_channel_io_perm_failure) {
18240                 if (netdev) {
18241                         tg3_napi_enable(tp);
18242                         dev_close(netdev);
18243                 }
18244                 err = PCI_ERS_RESULT_DISCONNECT;
18245         } else {
18246                 pci_disable_device(pdev);
18247         }
18248
18249         rtnl_unlock();
18250
18251         return err;
18252 }
18253
18254 /**
18255  * tg3_io_slot_reset - called after the pci bus has been reset.
18256  * @pdev: Pointer to PCI device
18257  *
18258  * Restart the card from scratch, as if from a cold-boot.
18259  * At this point, the card has exprienced a hard reset,
18260  * followed by fixups by BIOS, and has its config space
18261  * set up identically to what it was at cold boot.
18262  */
18263 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18264 {
18265         struct net_device *netdev = pci_get_drvdata(pdev);
18266         struct tg3 *tp = netdev_priv(netdev);
18267         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18268         int err;
18269
18270         rtnl_lock();
18271
18272         if (pci_enable_device(pdev)) {
18273                 dev_err(&pdev->dev,
18274                         "Cannot re-enable PCI device after reset.\n");
18275                 goto done;
18276         }
18277
18278         pci_set_master(pdev);
18279         pci_restore_state(pdev);
18280         pci_save_state(pdev);
18281
18282         if (!netdev || !netif_running(netdev)) {
18283                 rc = PCI_ERS_RESULT_RECOVERED;
18284                 goto done;
18285         }
18286
18287         err = tg3_power_up(tp);
18288         if (err)
18289                 goto done;
18290
18291         rc = PCI_ERS_RESULT_RECOVERED;
18292
18293 done:
18294         if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18295                 tg3_napi_enable(tp);
18296                 dev_close(netdev);
18297         }
18298         rtnl_unlock();
18299
18300         return rc;
18301 }
18302
18303 /**
18304  * tg3_io_resume - called when traffic can start flowing again.
18305  * @pdev: Pointer to PCI device
18306  *
18307  * This callback is called when the error recovery driver tells
18308  * us that its OK to resume normal operation.
18309  */
18310 static void tg3_io_resume(struct pci_dev *pdev)
18311 {
18312         struct net_device *netdev = pci_get_drvdata(pdev);
18313         struct tg3 *tp = netdev_priv(netdev);
18314         int err;
18315
18316         rtnl_lock();
18317
18318         if (!netdev || !netif_running(netdev))
18319                 goto done;
18320
18321         tg3_full_lock(tp, 0);
18322         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18323         tg3_flag_set(tp, INIT_COMPLETE);
18324         err = tg3_restart_hw(tp, true);
18325         if (err) {
18326                 tg3_full_unlock(tp);
18327                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18328                 goto done;
18329         }
18330
18331         netif_device_attach(netdev);
18332
18333         tg3_timer_start(tp);
18334
18335         tg3_netif_start(tp);
18336
18337         tg3_full_unlock(tp);
18338
18339         tg3_phy_start(tp);
18340
18341 done:
18342         tp->pcierr_recovery = false;
18343         rtnl_unlock();
18344 }
18345
18346 static const struct pci_error_handlers tg3_err_handler = {
18347         .error_detected = tg3_io_error_detected,
18348         .slot_reset     = tg3_io_slot_reset,
18349         .resume         = tg3_io_resume
18350 };
18351
18352 static struct pci_driver tg3_driver = {
18353         .name           = DRV_MODULE_NAME,
18354         .id_table       = tg3_pci_tbl,
18355         .probe          = tg3_init_one,
18356         .remove         = tg3_remove_one,
18357         .err_handler    = &tg3_err_handler,
18358         .driver.pm      = &tg3_pm_ops,
18359         .shutdown       = tg3_shutdown,
18360 };
18361
18362 module_pci_driver(tg3_driver);