GNU Linux-libre 4.4.289-gnu1
[releases.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2014 Broadcom Corporation.
8  *
9 /*(DEBLOBBED)*/
10
11
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/stringify.h>
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/compiler.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
20 #include <linux/in.h>
21 #include <linux/interrupt.h>
22 #include <linux/ioport.h>
23 #include <linux/pci.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/skbuff.h>
27 #include <linux/ethtool.h>
28 #include <linux/mdio.h>
29 #include <linux/mii.h>
30 #include <linux/phy.h>
31 #include <linux/brcmphy.h>
32 #include <linux/if.h>
33 #include <linux/if_vlan.h>
34 #include <linux/ip.h>
35 #include <linux/tcp.h>
36 #include <linux/workqueue.h>
37 #include <linux/prefetch.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/firmware.h>
40 #include <linux/ssb/ssb_driver_gige.h>
41 #include <linux/hwmon.h>
42 #include <linux/hwmon-sysfs.h>
43
44 #include <net/checksum.h>
45 #include <net/ip.h>
46
47 #include <linux/io.h>
48 #include <asm/byteorder.h>
49 #include <linux/uaccess.h>
50
51 #include <uapi/linux/net_tstamp.h>
52 #include <linux/ptp_clock_kernel.h>
53
54 #ifdef CONFIG_SPARC
55 #include <asm/idprom.h>
56 #include <asm/prom.h>
57 #endif
58
59 #define BAR_0   0
60 #define BAR_2   2
61
62 #include "tg3.h"
63
64 /* Functions & macros to verify TG3_FLAGS types */
65
66 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
67 {
68         return test_bit(flag, bits);
69 }
70
71 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
72 {
73         set_bit(flag, bits);
74 }
75
76 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
77 {
78         clear_bit(flag, bits);
79 }
80
81 #define tg3_flag(tp, flag)                              \
82         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
83 #define tg3_flag_set(tp, flag)                          \
84         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_clear(tp, flag)                        \
86         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
87
88 #define DRV_MODULE_NAME         "tg3"
89 #define TG3_MAJ_NUM                     3
90 #define TG3_MIN_NUM                     137
91 #define DRV_MODULE_VERSION      \
92         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
93 #define DRV_MODULE_RELDATE      "May 11, 2014"
94
95 #define RESET_KIND_SHUTDOWN     0
96 #define RESET_KIND_INIT         1
97 #define RESET_KIND_SUSPEND      2
98
99 #define TG3_DEF_RX_MODE         0
100 #define TG3_DEF_TX_MODE         0
101 #define TG3_DEF_MSG_ENABLE        \
102         (NETIF_MSG_DRV          | \
103          NETIF_MSG_PROBE        | \
104          NETIF_MSG_LINK         | \
105          NETIF_MSG_TIMER        | \
106          NETIF_MSG_IFDOWN       | \
107          NETIF_MSG_IFUP         | \
108          NETIF_MSG_RX_ERR       | \
109          NETIF_MSG_TX_ERR)
110
111 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
112
113 /* length of time before we decide the hardware is borked,
114  * and dev->tx_timeout() should be called to fix the problem
115  */
116
117 #define TG3_TX_TIMEOUT                  (5 * HZ)
118
119 /* hardware minimum and maximum for a single frame's data payload */
120 #define TG3_MIN_MTU                     60
121 #define TG3_MAX_MTU(tp) \
122         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
123
124 /* These numbers seem to be hard coded in the NIC firmware somehow.
125  * You can't change the ring sizes, but you can change where you place
126  * them in the NIC onboard memory.
127  */
128 #define TG3_RX_STD_RING_SIZE(tp) \
129         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
130          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
131 #define TG3_DEF_RX_RING_PENDING         200
132 #define TG3_RX_JMB_RING_SIZE(tp) \
133         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
134          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
135 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
136
137 /* Do not place this n-ring entries value into the tp struct itself,
138  * we really want to expose these constants to GCC so that modulo et
139  * al.  operations are done with shifts and masks instead of with
140  * hw multiply/modulo instructions.  Another solution would be to
141  * replace things like '% foo' with '& (foo - 1)'.
142  */
143
144 #define TG3_TX_RING_SIZE                512
145 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
146
147 #define TG3_RX_STD_RING_BYTES(tp) \
148         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
149 #define TG3_RX_JMB_RING_BYTES(tp) \
150         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
151 #define TG3_RX_RCB_RING_BYTES(tp) \
152         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
153 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
154                                  TG3_TX_RING_SIZE)
155 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
156
157 #define TG3_DMA_BYTE_ENAB               64
158
159 #define TG3_RX_STD_DMA_SZ               1536
160 #define TG3_RX_JMB_DMA_SZ               9046
161
162 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
163
164 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
165 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
166
167 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
168         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
169
170 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
171         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
172
173 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
174  * that are at least dword aligned when used in PCIX mode.  The driver
175  * works around this bug by double copying the packet.  This workaround
176  * is built into the normal double copy length check for efficiency.
177  *
178  * However, the double copy is only necessary on those architectures
179  * where unaligned memory accesses are inefficient.  For those architectures
180  * where unaligned memory accesses incur little penalty, we can reintegrate
181  * the 5701 in the normal rx path.  Doing so saves a device structure
182  * dereference by hardcoding the double copy threshold in place.
183  */
184 #define TG3_RX_COPY_THRESHOLD           256
185 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
186         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
187 #else
188         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
189 #endif
190
191 #if (NET_IP_ALIGN != 0)
192 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
193 #else
194 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
195 #endif
196
197 /* minimum number of free TX descriptors required to wake up TX process */
198 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
199 #define TG3_TX_BD_DMA_MAX_2K            2048
200 #define TG3_TX_BD_DMA_MAX_4K            4096
201
202 #define TG3_RAW_IP_ALIGN 2
203
204 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
205 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
206
207 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
208 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
209
210 #define FIRMWARE_TG3            "/*(DEBLOBBED)*/"
211 #define FIRMWARE_TG357766       "/*(DEBLOBBED)*/"
212 #define FIRMWARE_TG3TSO         "/*(DEBLOBBED)*/"
213 #define FIRMWARE_TG3TSO5        "/*(DEBLOBBED)*/"
214
215 static char version[] =
216         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
217
218 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
219 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
220 MODULE_LICENSE("GPL");
221 MODULE_VERSION(DRV_MODULE_VERSION);
222 /*(DEBLOBBED)*/
223
224 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
225 module_param(tg3_debug, int, 0);
226 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
227
228 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
229 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
230
231 static const struct pci_device_id tg3_pci_tbl[] = {
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
251          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
252                         TG3_DRV_DATA_FLAG_5705_10_100},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
254          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
255                         TG3_DRV_DATA_FLAG_5705_10_100},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
258          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
259                         TG3_DRV_DATA_FLAG_5705_10_100},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
266          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
272          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
280         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
281                         PCI_VENDOR_ID_LENOVO,
282                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
283          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
286          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
305         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
306                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
307          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
308         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
309                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
310          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
311         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
312         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
313         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
314          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
315         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
316         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
317         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
318         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
319         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
320         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
322         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
324          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
326          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
329         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
330         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
332         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
334         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
335         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
336         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
338         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
339         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
340         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
341         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
342         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
343         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
344         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
345         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
346         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
347         {}
348 };
349
350 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
351
352 static const struct {
353         const char string[ETH_GSTRING_LEN];
354 } ethtool_stats_keys[] = {
355         { "rx_octets" },
356         { "rx_fragments" },
357         { "rx_ucast_packets" },
358         { "rx_mcast_packets" },
359         { "rx_bcast_packets" },
360         { "rx_fcs_errors" },
361         { "rx_align_errors" },
362         { "rx_xon_pause_rcvd" },
363         { "rx_xoff_pause_rcvd" },
364         { "rx_mac_ctrl_rcvd" },
365         { "rx_xoff_entered" },
366         { "rx_frame_too_long_errors" },
367         { "rx_jabbers" },
368         { "rx_undersize_packets" },
369         { "rx_in_length_errors" },
370         { "rx_out_length_errors" },
371         { "rx_64_or_less_octet_packets" },
372         { "rx_65_to_127_octet_packets" },
373         { "rx_128_to_255_octet_packets" },
374         { "rx_256_to_511_octet_packets" },
375         { "rx_512_to_1023_octet_packets" },
376         { "rx_1024_to_1522_octet_packets" },
377         { "rx_1523_to_2047_octet_packets" },
378         { "rx_2048_to_4095_octet_packets" },
379         { "rx_4096_to_8191_octet_packets" },
380         { "rx_8192_to_9022_octet_packets" },
381
382         { "tx_octets" },
383         { "tx_collisions" },
384
385         { "tx_xon_sent" },
386         { "tx_xoff_sent" },
387         { "tx_flow_control" },
388         { "tx_mac_errors" },
389         { "tx_single_collisions" },
390         { "tx_mult_collisions" },
391         { "tx_deferred" },
392         { "tx_excessive_collisions" },
393         { "tx_late_collisions" },
394         { "tx_collide_2times" },
395         { "tx_collide_3times" },
396         { "tx_collide_4times" },
397         { "tx_collide_5times" },
398         { "tx_collide_6times" },
399         { "tx_collide_7times" },
400         { "tx_collide_8times" },
401         { "tx_collide_9times" },
402         { "tx_collide_10times" },
403         { "tx_collide_11times" },
404         { "tx_collide_12times" },
405         { "tx_collide_13times" },
406         { "tx_collide_14times" },
407         { "tx_collide_15times" },
408         { "tx_ucast_packets" },
409         { "tx_mcast_packets" },
410         { "tx_bcast_packets" },
411         { "tx_carrier_sense_errors" },
412         { "tx_discards" },
413         { "tx_errors" },
414
415         { "dma_writeq_full" },
416         { "dma_write_prioq_full" },
417         { "rxbds_empty" },
418         { "rx_discards" },
419         { "rx_errors" },
420         { "rx_threshold_hit" },
421
422         { "dma_readq_full" },
423         { "dma_read_prioq_full" },
424         { "tx_comp_queue_full" },
425
426         { "ring_set_send_prod_index" },
427         { "ring_status_update" },
428         { "nic_irqs" },
429         { "nic_avoided_irqs" },
430         { "nic_tx_threshold_hit" },
431
432         { "mbuf_lwm_thresh_hit" },
433 };
434
435 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
436 #define TG3_NVRAM_TEST          0
437 #define TG3_LINK_TEST           1
438 #define TG3_REGISTER_TEST       2
439 #define TG3_MEMORY_TEST         3
440 #define TG3_MAC_LOOPB_TEST      4
441 #define TG3_PHY_LOOPB_TEST      5
442 #define TG3_EXT_LOOPB_TEST      6
443 #define TG3_INTERRUPT_TEST      7
444
445
446 static const struct {
447         const char string[ETH_GSTRING_LEN];
448 } ethtool_test_keys[] = {
449         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
450         [TG3_LINK_TEST]         = { "link test         (online) " },
451         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
452         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
453         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
454         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
455         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
456         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
457 };
458
459 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
460
461
462 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
463 {
464         writel(val, tp->regs + off);
465 }
466
467 static u32 tg3_read32(struct tg3 *tp, u32 off)
468 {
469         return readl(tp->regs + off);
470 }
471
472 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
473 {
474         writel(val, tp->aperegs + off);
475 }
476
477 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
478 {
479         return readl(tp->aperegs + off);
480 }
481
482 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
483 {
484         unsigned long flags;
485
486         spin_lock_irqsave(&tp->indirect_lock, flags);
487         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
488         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
489         spin_unlock_irqrestore(&tp->indirect_lock, flags);
490 }
491
492 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
493 {
494         writel(val, tp->regs + off);
495         readl(tp->regs + off);
496 }
497
498 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
499 {
500         unsigned long flags;
501         u32 val;
502
503         spin_lock_irqsave(&tp->indirect_lock, flags);
504         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
505         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
506         spin_unlock_irqrestore(&tp->indirect_lock, flags);
507         return val;
508 }
509
510 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
511 {
512         unsigned long flags;
513
514         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
515                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
516                                        TG3_64BIT_REG_LOW, val);
517                 return;
518         }
519         if (off == TG3_RX_STD_PROD_IDX_REG) {
520                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
521                                        TG3_64BIT_REG_LOW, val);
522                 return;
523         }
524
525         spin_lock_irqsave(&tp->indirect_lock, flags);
526         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
527         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
528         spin_unlock_irqrestore(&tp->indirect_lock, flags);
529
530         /* In indirect mode when disabling interrupts, we also need
531          * to clear the interrupt bit in the GRC local ctrl register.
532          */
533         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
534             (val == 0x1)) {
535                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
536                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
537         }
538 }
539
540 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
541 {
542         unsigned long flags;
543         u32 val;
544
545         spin_lock_irqsave(&tp->indirect_lock, flags);
546         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
547         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
548         spin_unlock_irqrestore(&tp->indirect_lock, flags);
549         return val;
550 }
551
552 /* usec_wait specifies the wait time in usec when writing to certain registers
553  * where it is unsafe to read back the register without some delay.
554  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
555  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
556  */
557 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
558 {
559         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
560                 /* Non-posted methods */
561                 tp->write32(tp, off, val);
562         else {
563                 /* Posted method */
564                 tg3_write32(tp, off, val);
565                 if (usec_wait)
566                         udelay(usec_wait);
567                 tp->read32(tp, off);
568         }
569         /* Wait again after the read for the posted method to guarantee that
570          * the wait time is met.
571          */
572         if (usec_wait)
573                 udelay(usec_wait);
574 }
575
576 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
577 {
578         tp->write32_mbox(tp, off, val);
579         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
580             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
581              !tg3_flag(tp, ICH_WORKAROUND)))
582                 tp->read32_mbox(tp, off);
583 }
584
585 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
586 {
587         void __iomem *mbox = tp->regs + off;
588         writel(val, mbox);
589         if (tg3_flag(tp, TXD_MBOX_HWBUG))
590                 writel(val, mbox);
591         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
592             tg3_flag(tp, FLUSH_POSTED_WRITES))
593                 readl(mbox);
594 }
595
596 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
597 {
598         return readl(tp->regs + off + GRCMBOX_BASE);
599 }
600
601 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
602 {
603         writel(val, tp->regs + off + GRCMBOX_BASE);
604 }
605
606 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
607 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
608 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
609 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
610 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
611
612 #define tw32(reg, val)                  tp->write32(tp, reg, val)
613 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
614 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
615 #define tr32(reg)                       tp->read32(tp, reg)
616
617 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
618 {
619         unsigned long flags;
620
621         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
622             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
623                 return;
624
625         spin_lock_irqsave(&tp->indirect_lock, flags);
626         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
627                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
628                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
629
630                 /* Always leave this as zero. */
631                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
632         } else {
633                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
634                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
635
636                 /* Always leave this as zero. */
637                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
638         }
639         spin_unlock_irqrestore(&tp->indirect_lock, flags);
640 }
641
642 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
643 {
644         unsigned long flags;
645
646         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
647             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
648                 *val = 0;
649                 return;
650         }
651
652         spin_lock_irqsave(&tp->indirect_lock, flags);
653         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
654                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
655                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
656
657                 /* Always leave this as zero. */
658                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
659         } else {
660                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
661                 *val = tr32(TG3PCI_MEM_WIN_DATA);
662
663                 /* Always leave this as zero. */
664                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
665         }
666         spin_unlock_irqrestore(&tp->indirect_lock, flags);
667 }
668
669 static void tg3_ape_lock_init(struct tg3 *tp)
670 {
671         int i;
672         u32 regbase, bit;
673
674         if (tg3_asic_rev(tp) == ASIC_REV_5761)
675                 regbase = TG3_APE_LOCK_GRANT;
676         else
677                 regbase = TG3_APE_PER_LOCK_GRANT;
678
679         /* Make sure the driver hasn't any stale locks. */
680         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
681                 switch (i) {
682                 case TG3_APE_LOCK_PHY0:
683                 case TG3_APE_LOCK_PHY1:
684                 case TG3_APE_LOCK_PHY2:
685                 case TG3_APE_LOCK_PHY3:
686                         bit = APE_LOCK_GRANT_DRIVER;
687                         break;
688                 default:
689                         if (!tp->pci_fn)
690                                 bit = APE_LOCK_GRANT_DRIVER;
691                         else
692                                 bit = 1 << tp->pci_fn;
693                 }
694                 tg3_ape_write32(tp, regbase + 4 * i, bit);
695         }
696
697 }
698
699 static int tg3_ape_lock(struct tg3 *tp, int locknum)
700 {
701         int i, off;
702         int ret = 0;
703         u32 status, req, gnt, bit;
704
705         if (!tg3_flag(tp, ENABLE_APE))
706                 return 0;
707
708         switch (locknum) {
709         case TG3_APE_LOCK_GPIO:
710                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
711                         return 0;
712         case TG3_APE_LOCK_GRC:
713         case TG3_APE_LOCK_MEM:
714                 if (!tp->pci_fn)
715                         bit = APE_LOCK_REQ_DRIVER;
716                 else
717                         bit = 1 << tp->pci_fn;
718                 break;
719         case TG3_APE_LOCK_PHY0:
720         case TG3_APE_LOCK_PHY1:
721         case TG3_APE_LOCK_PHY2:
722         case TG3_APE_LOCK_PHY3:
723                 bit = APE_LOCK_REQ_DRIVER;
724                 break;
725         default:
726                 return -EINVAL;
727         }
728
729         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
730                 req = TG3_APE_LOCK_REQ;
731                 gnt = TG3_APE_LOCK_GRANT;
732         } else {
733                 req = TG3_APE_PER_LOCK_REQ;
734                 gnt = TG3_APE_PER_LOCK_GRANT;
735         }
736
737         off = 4 * locknum;
738
739         tg3_ape_write32(tp, req + off, bit);
740
741         /* Wait for up to 1 millisecond to acquire lock. */
742         for (i = 0; i < 100; i++) {
743                 status = tg3_ape_read32(tp, gnt + off);
744                 if (status == bit)
745                         break;
746                 if (pci_channel_offline(tp->pdev))
747                         break;
748
749                 udelay(10);
750         }
751
752         if (status != bit) {
753                 /* Revoke the lock request. */
754                 tg3_ape_write32(tp, gnt + off, bit);
755                 ret = -EBUSY;
756         }
757
758         return ret;
759 }
760
761 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
762 {
763         u32 gnt, bit;
764
765         if (!tg3_flag(tp, ENABLE_APE))
766                 return;
767
768         switch (locknum) {
769         case TG3_APE_LOCK_GPIO:
770                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
771                         return;
772         case TG3_APE_LOCK_GRC:
773         case TG3_APE_LOCK_MEM:
774                 if (!tp->pci_fn)
775                         bit = APE_LOCK_GRANT_DRIVER;
776                 else
777                         bit = 1 << tp->pci_fn;
778                 break;
779         case TG3_APE_LOCK_PHY0:
780         case TG3_APE_LOCK_PHY1:
781         case TG3_APE_LOCK_PHY2:
782         case TG3_APE_LOCK_PHY3:
783                 bit = APE_LOCK_GRANT_DRIVER;
784                 break;
785         default:
786                 return;
787         }
788
789         if (tg3_asic_rev(tp) == ASIC_REV_5761)
790                 gnt = TG3_APE_LOCK_GRANT;
791         else
792                 gnt = TG3_APE_PER_LOCK_GRANT;
793
794         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
795 }
796
797 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
798 {
799         u32 apedata;
800
801         while (timeout_us) {
802                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
803                         return -EBUSY;
804
805                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
806                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
807                         break;
808
809                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
810
811                 udelay(10);
812                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
813         }
814
815         return timeout_us ? 0 : -EBUSY;
816 }
817
818 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
819 {
820         u32 i, apedata;
821
822         for (i = 0; i < timeout_us / 10; i++) {
823                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
824
825                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
826                         break;
827
828                 udelay(10);
829         }
830
831         return i == timeout_us / 10;
832 }
833
834 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
835                                    u32 len)
836 {
837         int err;
838         u32 i, bufoff, msgoff, maxlen, apedata;
839
840         if (!tg3_flag(tp, APE_HAS_NCSI))
841                 return 0;
842
843         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
844         if (apedata != APE_SEG_SIG_MAGIC)
845                 return -ENODEV;
846
847         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
848         if (!(apedata & APE_FW_STATUS_READY))
849                 return -EAGAIN;
850
851         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
852                  TG3_APE_SHMEM_BASE;
853         msgoff = bufoff + 2 * sizeof(u32);
854         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
855
856         while (len) {
857                 u32 length;
858
859                 /* Cap xfer sizes to scratchpad limits. */
860                 length = (len > maxlen) ? maxlen : len;
861                 len -= length;
862
863                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
864                 if (!(apedata & APE_FW_STATUS_READY))
865                         return -EAGAIN;
866
867                 /* Wait for up to 1 msec for APE to service previous event. */
868                 err = tg3_ape_event_lock(tp, 1000);
869                 if (err)
870                         return err;
871
872                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
873                           APE_EVENT_STATUS_SCRTCHPD_READ |
874                           APE_EVENT_STATUS_EVENT_PENDING;
875                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
876
877                 tg3_ape_write32(tp, bufoff, base_off);
878                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
879
880                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
881                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
882
883                 base_off += length;
884
885                 if (tg3_ape_wait_for_event(tp, 30000))
886                         return -EAGAIN;
887
888                 for (i = 0; length; i += 4, length -= 4) {
889                         u32 val = tg3_ape_read32(tp, msgoff + i);
890                         memcpy(data, &val, sizeof(u32));
891                         data++;
892                 }
893         }
894
895         return 0;
896 }
897
898 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
899 {
900         int err;
901         u32 apedata;
902
903         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
904         if (apedata != APE_SEG_SIG_MAGIC)
905                 return -EAGAIN;
906
907         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
908         if (!(apedata & APE_FW_STATUS_READY))
909                 return -EAGAIN;
910
911         /* Wait for up to 1 millisecond for APE to service previous event. */
912         err = tg3_ape_event_lock(tp, 1000);
913         if (err)
914                 return err;
915
916         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
917                         event | APE_EVENT_STATUS_EVENT_PENDING);
918
919         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
920         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
921
922         return 0;
923 }
924
925 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
926 {
927         u32 event;
928         u32 apedata;
929
930         if (!tg3_flag(tp, ENABLE_APE))
931                 return;
932
933         switch (kind) {
934         case RESET_KIND_INIT:
935                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
936                                 APE_HOST_SEG_SIG_MAGIC);
937                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
938                                 APE_HOST_SEG_LEN_MAGIC);
939                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
940                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
941                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
942                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
943                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
944                                 APE_HOST_BEHAV_NO_PHYLOCK);
945                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
946                                     TG3_APE_HOST_DRVR_STATE_START);
947
948                 event = APE_EVENT_STATUS_STATE_START;
949                 break;
950         case RESET_KIND_SHUTDOWN:
951                 /* With the interface we are currently using,
952                  * APE does not track driver state.  Wiping
953                  * out the HOST SEGMENT SIGNATURE forces
954                  * the APE to assume OS absent status.
955                  */
956                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
957
958                 if (device_may_wakeup(&tp->pdev->dev) &&
959                     tg3_flag(tp, WOL_ENABLE)) {
960                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
961                                             TG3_APE_HOST_WOL_SPEED_AUTO);
962                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
963                 } else
964                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
965
966                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
967
968                 event = APE_EVENT_STATUS_STATE_UNLOAD;
969                 break;
970         default:
971                 return;
972         }
973
974         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
975
976         tg3_ape_send_event(tp, event);
977 }
978
979 static void tg3_disable_ints(struct tg3 *tp)
980 {
981         int i;
982
983         tw32(TG3PCI_MISC_HOST_CTRL,
984              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
985         for (i = 0; i < tp->irq_max; i++)
986                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
987 }
988
989 static void tg3_enable_ints(struct tg3 *tp)
990 {
991         int i;
992
993         tp->irq_sync = 0;
994         wmb();
995
996         tw32(TG3PCI_MISC_HOST_CTRL,
997              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
998
999         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1000         for (i = 0; i < tp->irq_cnt; i++) {
1001                 struct tg3_napi *tnapi = &tp->napi[i];
1002
1003                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1004                 if (tg3_flag(tp, 1SHOT_MSI))
1005                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1006
1007                 tp->coal_now |= tnapi->coal_now;
1008         }
1009
1010         /* Force an initial interrupt */
1011         if (!tg3_flag(tp, TAGGED_STATUS) &&
1012             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1013                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1014         else
1015                 tw32(HOSTCC_MODE, tp->coal_now);
1016
1017         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1018 }
1019
1020 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1021 {
1022         struct tg3 *tp = tnapi->tp;
1023         struct tg3_hw_status *sblk = tnapi->hw_status;
1024         unsigned int work_exists = 0;
1025
1026         /* check for phy events */
1027         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1028                 if (sblk->status & SD_STATUS_LINK_CHG)
1029                         work_exists = 1;
1030         }
1031
1032         /* check for TX work to do */
1033         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1034                 work_exists = 1;
1035
1036         /* check for RX work to do */
1037         if (tnapi->rx_rcb_prod_idx &&
1038             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1039                 work_exists = 1;
1040
1041         return work_exists;
1042 }
1043
1044 /* tg3_int_reenable
1045  *  similar to tg3_enable_ints, but it accurately determines whether there
1046  *  is new work pending and can return without flushing the PIO write
1047  *  which reenables interrupts
1048  */
1049 static void tg3_int_reenable(struct tg3_napi *tnapi)
1050 {
1051         struct tg3 *tp = tnapi->tp;
1052
1053         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1054         mmiowb();
1055
1056         /* When doing tagged status, this work check is unnecessary.
1057          * The last_tag we write above tells the chip which piece of
1058          * work we've completed.
1059          */
1060         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1061                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1062                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1063 }
1064
1065 static void tg3_switch_clocks(struct tg3 *tp)
1066 {
1067         u32 clock_ctrl;
1068         u32 orig_clock_ctrl;
1069
1070         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1071                 return;
1072
1073         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1074
1075         orig_clock_ctrl = clock_ctrl;
1076         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1077                        CLOCK_CTRL_CLKRUN_OENABLE |
1078                        0x1f);
1079         tp->pci_clock_ctrl = clock_ctrl;
1080
1081         if (tg3_flag(tp, 5705_PLUS)) {
1082                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1083                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1084                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1085                 }
1086         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1087                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1088                             clock_ctrl |
1089                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1090                             40);
1091                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1092                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1093                             40);
1094         }
1095         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1096 }
1097
1098 #define PHY_BUSY_LOOPS  5000
1099
1100 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1101                          u32 *val)
1102 {
1103         u32 frame_val;
1104         unsigned int loops;
1105         int ret;
1106
1107         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1108                 tw32_f(MAC_MI_MODE,
1109                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1110                 udelay(80);
1111         }
1112
1113         tg3_ape_lock(tp, tp->phy_ape_lock);
1114
1115         *val = 0x0;
1116
1117         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1118                       MI_COM_PHY_ADDR_MASK);
1119         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1120                       MI_COM_REG_ADDR_MASK);
1121         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1122
1123         tw32_f(MAC_MI_COM, frame_val);
1124
1125         loops = PHY_BUSY_LOOPS;
1126         while (loops != 0) {
1127                 udelay(10);
1128                 frame_val = tr32(MAC_MI_COM);
1129
1130                 if ((frame_val & MI_COM_BUSY) == 0) {
1131                         udelay(5);
1132                         frame_val = tr32(MAC_MI_COM);
1133                         break;
1134                 }
1135                 loops -= 1;
1136         }
1137
1138         ret = -EBUSY;
1139         if (loops != 0) {
1140                 *val = frame_val & MI_COM_DATA_MASK;
1141                 ret = 0;
1142         }
1143
1144         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1145                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1146                 udelay(80);
1147         }
1148
1149         tg3_ape_unlock(tp, tp->phy_ape_lock);
1150
1151         return ret;
1152 }
1153
1154 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1155 {
1156         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1157 }
1158
1159 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1160                           u32 val)
1161 {
1162         u32 frame_val;
1163         unsigned int loops;
1164         int ret;
1165
1166         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1167             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1168                 return 0;
1169
1170         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1171                 tw32_f(MAC_MI_MODE,
1172                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1173                 udelay(80);
1174         }
1175
1176         tg3_ape_lock(tp, tp->phy_ape_lock);
1177
1178         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1179                       MI_COM_PHY_ADDR_MASK);
1180         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1181                       MI_COM_REG_ADDR_MASK);
1182         frame_val |= (val & MI_COM_DATA_MASK);
1183         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1184
1185         tw32_f(MAC_MI_COM, frame_val);
1186
1187         loops = PHY_BUSY_LOOPS;
1188         while (loops != 0) {
1189                 udelay(10);
1190                 frame_val = tr32(MAC_MI_COM);
1191                 if ((frame_val & MI_COM_BUSY) == 0) {
1192                         udelay(5);
1193                         frame_val = tr32(MAC_MI_COM);
1194                         break;
1195                 }
1196                 loops -= 1;
1197         }
1198
1199         ret = -EBUSY;
1200         if (loops != 0)
1201                 ret = 0;
1202
1203         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1204                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1205                 udelay(80);
1206         }
1207
1208         tg3_ape_unlock(tp, tp->phy_ape_lock);
1209
1210         return ret;
1211 }
1212
1213 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1214 {
1215         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1216 }
1217
1218 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1219 {
1220         int err;
1221
1222         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1223         if (err)
1224                 goto done;
1225
1226         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1227         if (err)
1228                 goto done;
1229
1230         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1231                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1232         if (err)
1233                 goto done;
1234
1235         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1236
1237 done:
1238         return err;
1239 }
1240
1241 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1242 {
1243         int err;
1244
1245         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1246         if (err)
1247                 goto done;
1248
1249         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1250         if (err)
1251                 goto done;
1252
1253         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1254                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1255         if (err)
1256                 goto done;
1257
1258         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1259
1260 done:
1261         return err;
1262 }
1263
1264 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1265 {
1266         int err;
1267
1268         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1269         if (!err)
1270                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1271
1272         return err;
1273 }
1274
1275 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1276 {
1277         int err;
1278
1279         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1280         if (!err)
1281                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1282
1283         return err;
1284 }
1285
1286 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1287 {
1288         int err;
1289
1290         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1291                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1292                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1293         if (!err)
1294                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1295
1296         return err;
1297 }
1298
1299 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1300 {
1301         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1302                 set |= MII_TG3_AUXCTL_MISC_WREN;
1303
1304         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1305 }
1306
1307 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1308 {
1309         u32 val;
1310         int err;
1311
1312         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1313
1314         if (err)
1315                 return err;
1316
1317         if (enable)
1318                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1319         else
1320                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1321
1322         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1323                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1324
1325         return err;
1326 }
1327
1328 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1329 {
1330         return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1331                             reg | val | MII_TG3_MISC_SHDW_WREN);
1332 }
1333
1334 static int tg3_bmcr_reset(struct tg3 *tp)
1335 {
1336         u32 phy_control;
1337         int limit, err;
1338
1339         /* OK, reset it, and poll the BMCR_RESET bit until it
1340          * clears or we time out.
1341          */
1342         phy_control = BMCR_RESET;
1343         err = tg3_writephy(tp, MII_BMCR, phy_control);
1344         if (err != 0)
1345                 return -EBUSY;
1346
1347         limit = 5000;
1348         while (limit--) {
1349                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1350                 if (err != 0)
1351                         return -EBUSY;
1352
1353                 if ((phy_control & BMCR_RESET) == 0) {
1354                         udelay(40);
1355                         break;
1356                 }
1357                 udelay(10);
1358         }
1359         if (limit < 0)
1360                 return -EBUSY;
1361
1362         return 0;
1363 }
1364
1365 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1366 {
1367         struct tg3 *tp = bp->priv;
1368         u32 val;
1369
1370         spin_lock_bh(&tp->lock);
1371
1372         if (__tg3_readphy(tp, mii_id, reg, &val))
1373                 val = -EIO;
1374
1375         spin_unlock_bh(&tp->lock);
1376
1377         return val;
1378 }
1379
1380 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1381 {
1382         struct tg3 *tp = bp->priv;
1383         u32 ret = 0;
1384
1385         spin_lock_bh(&tp->lock);
1386
1387         if (__tg3_writephy(tp, mii_id, reg, val))
1388                 ret = -EIO;
1389
1390         spin_unlock_bh(&tp->lock);
1391
1392         return ret;
1393 }
1394
1395 static void tg3_mdio_config_5785(struct tg3 *tp)
1396 {
1397         u32 val;
1398         struct phy_device *phydev;
1399
1400         phydev = tp->mdio_bus->phy_map[tp->phy_addr];
1401         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1402         case PHY_ID_BCM50610:
1403         case PHY_ID_BCM50610M:
1404                 val = MAC_PHYCFG2_50610_LED_MODES;
1405                 break;
1406         case PHY_ID_BCMAC131:
1407                 val = MAC_PHYCFG2_AC131_LED_MODES;
1408                 break;
1409         case PHY_ID_RTL8211C:
1410                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1411                 break;
1412         case PHY_ID_RTL8201E:
1413                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1414                 break;
1415         default:
1416                 return;
1417         }
1418
1419         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1420                 tw32(MAC_PHYCFG2, val);
1421
1422                 val = tr32(MAC_PHYCFG1);
1423                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1424                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1425                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1426                 tw32(MAC_PHYCFG1, val);
1427
1428                 return;
1429         }
1430
1431         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1432                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1433                        MAC_PHYCFG2_FMODE_MASK_MASK |
1434                        MAC_PHYCFG2_GMODE_MASK_MASK |
1435                        MAC_PHYCFG2_ACT_MASK_MASK   |
1436                        MAC_PHYCFG2_QUAL_MASK_MASK |
1437                        MAC_PHYCFG2_INBAND_ENABLE;
1438
1439         tw32(MAC_PHYCFG2, val);
1440
1441         val = tr32(MAC_PHYCFG1);
1442         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1443                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1444         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1445                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1446                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1447                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1448                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1449         }
1450         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1451                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1452         tw32(MAC_PHYCFG1, val);
1453
1454         val = tr32(MAC_EXT_RGMII_MODE);
1455         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1456                  MAC_RGMII_MODE_RX_QUALITY |
1457                  MAC_RGMII_MODE_RX_ACTIVITY |
1458                  MAC_RGMII_MODE_RX_ENG_DET |
1459                  MAC_RGMII_MODE_TX_ENABLE |
1460                  MAC_RGMII_MODE_TX_LOWPWR |
1461                  MAC_RGMII_MODE_TX_RESET);
1462         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1463                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1464                         val |= MAC_RGMII_MODE_RX_INT_B |
1465                                MAC_RGMII_MODE_RX_QUALITY |
1466                                MAC_RGMII_MODE_RX_ACTIVITY |
1467                                MAC_RGMII_MODE_RX_ENG_DET;
1468                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1469                         val |= MAC_RGMII_MODE_TX_ENABLE |
1470                                MAC_RGMII_MODE_TX_LOWPWR |
1471                                MAC_RGMII_MODE_TX_RESET;
1472         }
1473         tw32(MAC_EXT_RGMII_MODE, val);
1474 }
1475
1476 static void tg3_mdio_start(struct tg3 *tp)
1477 {
1478         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1479         tw32_f(MAC_MI_MODE, tp->mi_mode);
1480         udelay(80);
1481
1482         if (tg3_flag(tp, MDIOBUS_INITED) &&
1483             tg3_asic_rev(tp) == ASIC_REV_5785)
1484                 tg3_mdio_config_5785(tp);
1485 }
1486
1487 static int tg3_mdio_init(struct tg3 *tp)
1488 {
1489         int i;
1490         u32 reg;
1491         struct phy_device *phydev;
1492
1493         if (tg3_flag(tp, 5717_PLUS)) {
1494                 u32 is_serdes;
1495
1496                 tp->phy_addr = tp->pci_fn + 1;
1497
1498                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1499                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1500                 else
1501                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1502                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1503                 if (is_serdes)
1504                         tp->phy_addr += 7;
1505         } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1506                 int addr;
1507
1508                 addr = ssb_gige_get_phyaddr(tp->pdev);
1509                 if (addr < 0)
1510                         return addr;
1511                 tp->phy_addr = addr;
1512         } else
1513                 tp->phy_addr = TG3_PHY_MII_ADDR;
1514
1515         tg3_mdio_start(tp);
1516
1517         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1518                 return 0;
1519
1520         tp->mdio_bus = mdiobus_alloc();
1521         if (tp->mdio_bus == NULL)
1522                 return -ENOMEM;
1523
1524         tp->mdio_bus->name     = "tg3 mdio bus";
1525         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1526                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1527         tp->mdio_bus->priv     = tp;
1528         tp->mdio_bus->parent   = &tp->pdev->dev;
1529         tp->mdio_bus->read     = &tg3_mdio_read;
1530         tp->mdio_bus->write    = &tg3_mdio_write;
1531         tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1532         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1533
1534         for (i = 0; i < PHY_MAX_ADDR; i++)
1535                 tp->mdio_bus->irq[i] = PHY_POLL;
1536
1537         /* The bus registration will look for all the PHYs on the mdio bus.
1538          * Unfortunately, it does not ensure the PHY is powered up before
1539          * accessing the PHY ID registers.  A chip reset is the
1540          * quickest way to bring the device back to an operational state..
1541          */
1542         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1543                 tg3_bmcr_reset(tp);
1544
1545         i = mdiobus_register(tp->mdio_bus);
1546         if (i) {
1547                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1548                 mdiobus_free(tp->mdio_bus);
1549                 return i;
1550         }
1551
1552         phydev = tp->mdio_bus->phy_map[tp->phy_addr];
1553
1554         if (!phydev || !phydev->drv) {
1555                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1556                 mdiobus_unregister(tp->mdio_bus);
1557                 mdiobus_free(tp->mdio_bus);
1558                 return -ENODEV;
1559         }
1560
1561         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1562         case PHY_ID_BCM57780:
1563                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1564                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1565                 break;
1566         case PHY_ID_BCM50610:
1567         case PHY_ID_BCM50610M:
1568                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1569                                      PHY_BRCM_RX_REFCLK_UNUSED |
1570                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1571                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1572                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1573                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1574                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1575                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1576                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1577                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1578                 /* fallthru */
1579         case PHY_ID_RTL8211C:
1580                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1581                 break;
1582         case PHY_ID_RTL8201E:
1583         case PHY_ID_BCMAC131:
1584                 phydev->interface = PHY_INTERFACE_MODE_MII;
1585                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1586                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1587                 break;
1588         }
1589
1590         tg3_flag_set(tp, MDIOBUS_INITED);
1591
1592         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1593                 tg3_mdio_config_5785(tp);
1594
1595         return 0;
1596 }
1597
1598 static void tg3_mdio_fini(struct tg3 *tp)
1599 {
1600         if (tg3_flag(tp, MDIOBUS_INITED)) {
1601                 tg3_flag_clear(tp, MDIOBUS_INITED);
1602                 mdiobus_unregister(tp->mdio_bus);
1603                 mdiobus_free(tp->mdio_bus);
1604         }
1605 }
1606
1607 /* tp->lock is held. */
1608 static inline void tg3_generate_fw_event(struct tg3 *tp)
1609 {
1610         u32 val;
1611
1612         val = tr32(GRC_RX_CPU_EVENT);
1613         val |= GRC_RX_CPU_DRIVER_EVENT;
1614         tw32_f(GRC_RX_CPU_EVENT, val);
1615
1616         tp->last_event_jiffies = jiffies;
1617 }
1618
1619 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1620
1621 /* tp->lock is held. */
1622 static void tg3_wait_for_event_ack(struct tg3 *tp)
1623 {
1624         int i;
1625         unsigned int delay_cnt;
1626         long time_remain;
1627
1628         /* If enough time has passed, no wait is necessary. */
1629         time_remain = (long)(tp->last_event_jiffies + 1 +
1630                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1631                       (long)jiffies;
1632         if (time_remain < 0)
1633                 return;
1634
1635         /* Check if we can shorten the wait time. */
1636         delay_cnt = jiffies_to_usecs(time_remain);
1637         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1638                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1639         delay_cnt = (delay_cnt >> 3) + 1;
1640
1641         for (i = 0; i < delay_cnt; i++) {
1642                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1643                         break;
1644                 if (pci_channel_offline(tp->pdev))
1645                         break;
1646
1647                 udelay(8);
1648         }
1649 }
1650
1651 /* tp->lock is held. */
1652 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1653 {
1654         u32 reg, val;
1655
1656         val = 0;
1657         if (!tg3_readphy(tp, MII_BMCR, &reg))
1658                 val = reg << 16;
1659         if (!tg3_readphy(tp, MII_BMSR, &reg))
1660                 val |= (reg & 0xffff);
1661         *data++ = val;
1662
1663         val = 0;
1664         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1665                 val = reg << 16;
1666         if (!tg3_readphy(tp, MII_LPA, &reg))
1667                 val |= (reg & 0xffff);
1668         *data++ = val;
1669
1670         val = 0;
1671         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1672                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1673                         val = reg << 16;
1674                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1675                         val |= (reg & 0xffff);
1676         }
1677         *data++ = val;
1678
1679         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1680                 val = reg << 16;
1681         else
1682                 val = 0;
1683         *data++ = val;
1684 }
1685
1686 /* tp->lock is held. */
1687 static void tg3_ump_link_report(struct tg3 *tp)
1688 {
1689         u32 data[4];
1690
1691         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1692                 return;
1693
1694         tg3_phy_gather_ump_data(tp, data);
1695
1696         tg3_wait_for_event_ack(tp);
1697
1698         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1699         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1700         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1701         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1702         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1703         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1704
1705         tg3_generate_fw_event(tp);
1706 }
1707
1708 /* tp->lock is held. */
1709 static void tg3_stop_fw(struct tg3 *tp)
1710 {
1711         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1712                 /* Wait for RX cpu to ACK the previous event. */
1713                 tg3_wait_for_event_ack(tp);
1714
1715                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1716
1717                 tg3_generate_fw_event(tp);
1718
1719                 /* Wait for RX cpu to ACK this event. */
1720                 tg3_wait_for_event_ack(tp);
1721         }
1722 }
1723
1724 /* tp->lock is held. */
1725 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1726 {
1727         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1728                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1729
1730         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1731                 switch (kind) {
1732                 case RESET_KIND_INIT:
1733                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1734                                       DRV_STATE_START);
1735                         break;
1736
1737                 case RESET_KIND_SHUTDOWN:
1738                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1739                                       DRV_STATE_UNLOAD);
1740                         break;
1741
1742                 case RESET_KIND_SUSPEND:
1743                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1744                                       DRV_STATE_SUSPEND);
1745                         break;
1746
1747                 default:
1748                         break;
1749                 }
1750         }
1751 }
1752
1753 /* tp->lock is held. */
1754 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1755 {
1756         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1757                 switch (kind) {
1758                 case RESET_KIND_INIT:
1759                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1760                                       DRV_STATE_START_DONE);
1761                         break;
1762
1763                 case RESET_KIND_SHUTDOWN:
1764                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1765                                       DRV_STATE_UNLOAD_DONE);
1766                         break;
1767
1768                 default:
1769                         break;
1770                 }
1771         }
1772 }
1773
1774 /* tp->lock is held. */
1775 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1776 {
1777         if (tg3_flag(tp, ENABLE_ASF)) {
1778                 switch (kind) {
1779                 case RESET_KIND_INIT:
1780                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1781                                       DRV_STATE_START);
1782                         break;
1783
1784                 case RESET_KIND_SHUTDOWN:
1785                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1786                                       DRV_STATE_UNLOAD);
1787                         break;
1788
1789                 case RESET_KIND_SUSPEND:
1790                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1791                                       DRV_STATE_SUSPEND);
1792                         break;
1793
1794                 default:
1795                         break;
1796                 }
1797         }
1798 }
1799
1800 static int tg3_poll_fw(struct tg3 *tp)
1801 {
1802         int i;
1803         u32 val;
1804
1805         if (tg3_flag(tp, NO_FWARE_REPORTED))
1806                 return 0;
1807
1808         if (tg3_flag(tp, IS_SSB_CORE)) {
1809                 /* We don't use firmware. */
1810                 return 0;
1811         }
1812
1813         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1814                 /* Wait up to 20ms for init done. */
1815                 for (i = 0; i < 200; i++) {
1816                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1817                                 return 0;
1818                         if (pci_channel_offline(tp->pdev))
1819                                 return -ENODEV;
1820
1821                         udelay(100);
1822                 }
1823                 return -ENODEV;
1824         }
1825
1826         /* Wait for firmware initialization to complete. */
1827         for (i = 0; i < 100000; i++) {
1828                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1829                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1830                         break;
1831                 if (pci_channel_offline(tp->pdev)) {
1832                         if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1833                                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1834                                 netdev_info(tp->dev, "No firmware running\n");
1835                         }
1836
1837                         break;
1838                 }
1839
1840                 udelay(10);
1841         }
1842
1843         /* Chip might not be fitted with firmware.  Some Sun onboard
1844          * parts are configured like that.  So don't signal the timeout
1845          * of the above loop as an error, but do report the lack of
1846          * running firmware once.
1847          */
1848         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1849                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1850
1851                 netdev_info(tp->dev, "No firmware running\n");
1852         }
1853
1854         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1855                 /* The 57765 A0 needs a little more
1856                  * time to do some important work.
1857                  */
1858                 mdelay(10);
1859         }
1860
1861         return 0;
1862 }
1863
1864 static void tg3_link_report(struct tg3 *tp)
1865 {
1866         if (!netif_carrier_ok(tp->dev)) {
1867                 netif_info(tp, link, tp->dev, "Link is down\n");
1868                 tg3_ump_link_report(tp);
1869         } else if (netif_msg_link(tp)) {
1870                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1871                             (tp->link_config.active_speed == SPEED_1000 ?
1872                              1000 :
1873                              (tp->link_config.active_speed == SPEED_100 ?
1874                               100 : 10)),
1875                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1876                              "full" : "half"));
1877
1878                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1879                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1880                             "on" : "off",
1881                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1882                             "on" : "off");
1883
1884                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1885                         netdev_info(tp->dev, "EEE is %s\n",
1886                                     tp->setlpicnt ? "enabled" : "disabled");
1887
1888                 tg3_ump_link_report(tp);
1889         }
1890
1891         tp->link_up = netif_carrier_ok(tp->dev);
1892 }
1893
1894 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1895 {
1896         u32 flowctrl = 0;
1897
1898         if (adv & ADVERTISE_PAUSE_CAP) {
1899                 flowctrl |= FLOW_CTRL_RX;
1900                 if (!(adv & ADVERTISE_PAUSE_ASYM))
1901                         flowctrl |= FLOW_CTRL_TX;
1902         } else if (adv & ADVERTISE_PAUSE_ASYM)
1903                 flowctrl |= FLOW_CTRL_TX;
1904
1905         return flowctrl;
1906 }
1907
1908 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1909 {
1910         u16 miireg;
1911
1912         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1913                 miireg = ADVERTISE_1000XPAUSE;
1914         else if (flow_ctrl & FLOW_CTRL_TX)
1915                 miireg = ADVERTISE_1000XPSE_ASYM;
1916         else if (flow_ctrl & FLOW_CTRL_RX)
1917                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1918         else
1919                 miireg = 0;
1920
1921         return miireg;
1922 }
1923
1924 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1925 {
1926         u32 flowctrl = 0;
1927
1928         if (adv & ADVERTISE_1000XPAUSE) {
1929                 flowctrl |= FLOW_CTRL_RX;
1930                 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1931                         flowctrl |= FLOW_CTRL_TX;
1932         } else if (adv & ADVERTISE_1000XPSE_ASYM)
1933                 flowctrl |= FLOW_CTRL_TX;
1934
1935         return flowctrl;
1936 }
1937
1938 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1939 {
1940         u8 cap = 0;
1941
1942         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1943                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1944         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1945                 if (lcladv & ADVERTISE_1000XPAUSE)
1946                         cap = FLOW_CTRL_RX;
1947                 if (rmtadv & ADVERTISE_1000XPAUSE)
1948                         cap = FLOW_CTRL_TX;
1949         }
1950
1951         return cap;
1952 }
1953
1954 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1955 {
1956         u8 autoneg;
1957         u8 flowctrl = 0;
1958         u32 old_rx_mode = tp->rx_mode;
1959         u32 old_tx_mode = tp->tx_mode;
1960
1961         if (tg3_flag(tp, USE_PHYLIB))
1962                 autoneg = tp->mdio_bus->phy_map[tp->phy_addr]->autoneg;
1963         else
1964                 autoneg = tp->link_config.autoneg;
1965
1966         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1967                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1968                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1969                 else
1970                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1971         } else
1972                 flowctrl = tp->link_config.flowctrl;
1973
1974         tp->link_config.active_flowctrl = flowctrl;
1975
1976         if (flowctrl & FLOW_CTRL_RX)
1977                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1978         else
1979                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1980
1981         if (old_rx_mode != tp->rx_mode)
1982                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1983
1984         if (flowctrl & FLOW_CTRL_TX)
1985                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1986         else
1987                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1988
1989         if (old_tx_mode != tp->tx_mode)
1990                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1991 }
1992
1993 static void tg3_adjust_link(struct net_device *dev)
1994 {
1995         u8 oldflowctrl, linkmesg = 0;
1996         u32 mac_mode, lcl_adv, rmt_adv;
1997         struct tg3 *tp = netdev_priv(dev);
1998         struct phy_device *phydev = tp->mdio_bus->phy_map[tp->phy_addr];
1999
2000         spin_lock_bh(&tp->lock);
2001
2002         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2003                                     MAC_MODE_HALF_DUPLEX);
2004
2005         oldflowctrl = tp->link_config.active_flowctrl;
2006
2007         if (phydev->link) {
2008                 lcl_adv = 0;
2009                 rmt_adv = 0;
2010
2011                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2012                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2013                 else if (phydev->speed == SPEED_1000 ||
2014                          tg3_asic_rev(tp) != ASIC_REV_5785)
2015                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
2016                 else
2017                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2018
2019                 if (phydev->duplex == DUPLEX_HALF)
2020                         mac_mode |= MAC_MODE_HALF_DUPLEX;
2021                 else {
2022                         lcl_adv = mii_advertise_flowctrl(
2023                                   tp->link_config.flowctrl);
2024
2025                         if (phydev->pause)
2026                                 rmt_adv = LPA_PAUSE_CAP;
2027                         if (phydev->asym_pause)
2028                                 rmt_adv |= LPA_PAUSE_ASYM;
2029                 }
2030
2031                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2032         } else
2033                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2034
2035         if (mac_mode != tp->mac_mode) {
2036                 tp->mac_mode = mac_mode;
2037                 tw32_f(MAC_MODE, tp->mac_mode);
2038                 udelay(40);
2039         }
2040
2041         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2042                 if (phydev->speed == SPEED_10)
2043                         tw32(MAC_MI_STAT,
2044                              MAC_MI_STAT_10MBPS_MODE |
2045                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2046                 else
2047                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2048         }
2049
2050         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2051                 tw32(MAC_TX_LENGTHS,
2052                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2053                       (6 << TX_LENGTHS_IPG_SHIFT) |
2054                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2055         else
2056                 tw32(MAC_TX_LENGTHS,
2057                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2058                       (6 << TX_LENGTHS_IPG_SHIFT) |
2059                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2060
2061         if (phydev->link != tp->old_link ||
2062             phydev->speed != tp->link_config.active_speed ||
2063             phydev->duplex != tp->link_config.active_duplex ||
2064             oldflowctrl != tp->link_config.active_flowctrl)
2065                 linkmesg = 1;
2066
2067         tp->old_link = phydev->link;
2068         tp->link_config.active_speed = phydev->speed;
2069         tp->link_config.active_duplex = phydev->duplex;
2070
2071         spin_unlock_bh(&tp->lock);
2072
2073         if (linkmesg)
2074                 tg3_link_report(tp);
2075 }
2076
2077 static int tg3_phy_init(struct tg3 *tp)
2078 {
2079         struct phy_device *phydev;
2080
2081         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2082                 return 0;
2083
2084         /* Bring the PHY back to a known state. */
2085         tg3_bmcr_reset(tp);
2086
2087         phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2088
2089         /* Attach the MAC to the PHY. */
2090         phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2091                              tg3_adjust_link, phydev->interface);
2092         if (IS_ERR(phydev)) {
2093                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2094                 return PTR_ERR(phydev);
2095         }
2096
2097         /* Mask with MAC supported features. */
2098         switch (phydev->interface) {
2099         case PHY_INTERFACE_MODE_GMII:
2100         case PHY_INTERFACE_MODE_RGMII:
2101                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2102                         phydev->supported &= (PHY_GBIT_FEATURES |
2103                                               SUPPORTED_Pause |
2104                                               SUPPORTED_Asym_Pause);
2105                         break;
2106                 }
2107                 /* fallthru */
2108         case PHY_INTERFACE_MODE_MII:
2109                 phydev->supported &= (PHY_BASIC_FEATURES |
2110                                       SUPPORTED_Pause |
2111                                       SUPPORTED_Asym_Pause);
2112                 break;
2113         default:
2114                 phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
2115                 return -EINVAL;
2116         }
2117
2118         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2119
2120         phydev->advertising = phydev->supported;
2121
2122         return 0;
2123 }
2124
2125 static void tg3_phy_start(struct tg3 *tp)
2126 {
2127         struct phy_device *phydev;
2128
2129         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2130                 return;
2131
2132         phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2133
2134         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2135                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2136                 phydev->speed = tp->link_config.speed;
2137                 phydev->duplex = tp->link_config.duplex;
2138                 phydev->autoneg = tp->link_config.autoneg;
2139                 phydev->advertising = tp->link_config.advertising;
2140         }
2141
2142         phy_start(phydev);
2143
2144         phy_start_aneg(phydev);
2145 }
2146
2147 static void tg3_phy_stop(struct tg3 *tp)
2148 {
2149         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2150                 return;
2151
2152         phy_stop(tp->mdio_bus->phy_map[tp->phy_addr]);
2153 }
2154
2155 static void tg3_phy_fini(struct tg3 *tp)
2156 {
2157         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2158                 phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
2159                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2160         }
2161 }
2162
2163 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2164 {
2165         int err;
2166         u32 val;
2167
2168         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2169                 return 0;
2170
2171         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2172                 /* Cannot do read-modify-write on 5401 */
2173                 err = tg3_phy_auxctl_write(tp,
2174                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2175                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2176                                            0x4c20);
2177                 goto done;
2178         }
2179
2180         err = tg3_phy_auxctl_read(tp,
2181                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2182         if (err)
2183                 return err;
2184
2185         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2186         err = tg3_phy_auxctl_write(tp,
2187                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2188
2189 done:
2190         return err;
2191 }
2192
2193 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2194 {
2195         u32 phytest;
2196
2197         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2198                 u32 phy;
2199
2200                 tg3_writephy(tp, MII_TG3_FET_TEST,
2201                              phytest | MII_TG3_FET_SHADOW_EN);
2202                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2203                         if (enable)
2204                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2205                         else
2206                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2207                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2208                 }
2209                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2210         }
2211 }
2212
2213 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2214 {
2215         u32 reg;
2216
2217         if (!tg3_flag(tp, 5705_PLUS) ||
2218             (tg3_flag(tp, 5717_PLUS) &&
2219              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2220                 return;
2221
2222         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2223                 tg3_phy_fet_toggle_apd(tp, enable);
2224                 return;
2225         }
2226
2227         reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2228               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2229               MII_TG3_MISC_SHDW_SCR5_SDTL |
2230               MII_TG3_MISC_SHDW_SCR5_C125OE;
2231         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2232                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2233
2234         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2235
2236
2237         reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2238         if (enable)
2239                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2240
2241         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2242 }
2243
2244 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2245 {
2246         u32 phy;
2247
2248         if (!tg3_flag(tp, 5705_PLUS) ||
2249             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2250                 return;
2251
2252         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2253                 u32 ephy;
2254
2255                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2256                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2257
2258                         tg3_writephy(tp, MII_TG3_FET_TEST,
2259                                      ephy | MII_TG3_FET_SHADOW_EN);
2260                         if (!tg3_readphy(tp, reg, &phy)) {
2261                                 if (enable)
2262                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2263                                 else
2264                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2265                                 tg3_writephy(tp, reg, phy);
2266                         }
2267                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2268                 }
2269         } else {
2270                 int ret;
2271
2272                 ret = tg3_phy_auxctl_read(tp,
2273                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2274                 if (!ret) {
2275                         if (enable)
2276                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2277                         else
2278                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2279                         tg3_phy_auxctl_write(tp,
2280                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2281                 }
2282         }
2283 }
2284
2285 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2286 {
2287         int ret;
2288         u32 val;
2289
2290         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2291                 return;
2292
2293         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2294         if (!ret)
2295                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2296                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2297 }
2298
2299 static void tg3_phy_apply_otp(struct tg3 *tp)
2300 {
2301         u32 otp, phy;
2302
2303         if (!tp->phy_otp)
2304                 return;
2305
2306         otp = tp->phy_otp;
2307
2308         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2309                 return;
2310
2311         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2312         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2313         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2314
2315         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2316               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2317         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2318
2319         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2320         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2321         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2322
2323         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2324         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2325
2326         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2327         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2328
2329         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2330               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2331         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2332
2333         tg3_phy_toggle_auxctl_smdsp(tp, false);
2334 }
2335
2336 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2337 {
2338         u32 val;
2339         struct ethtool_eee *dest = &tp->eee;
2340
2341         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2342                 return;
2343
2344         if (eee)
2345                 dest = eee;
2346
2347         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2348                 return;
2349
2350         /* Pull eee_active */
2351         if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2352             val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2353                 dest->eee_active = 1;
2354         } else
2355                 dest->eee_active = 0;
2356
2357         /* Pull lp advertised settings */
2358         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2359                 return;
2360         dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2361
2362         /* Pull advertised and eee_enabled settings */
2363         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2364                 return;
2365         dest->eee_enabled = !!val;
2366         dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2367
2368         /* Pull tx_lpi_enabled */
2369         val = tr32(TG3_CPMU_EEE_MODE);
2370         dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2371
2372         /* Pull lpi timer value */
2373         dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2374 }
2375
2376 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2377 {
2378         u32 val;
2379
2380         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2381                 return;
2382
2383         tp->setlpicnt = 0;
2384
2385         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2386             current_link_up &&
2387             tp->link_config.active_duplex == DUPLEX_FULL &&
2388             (tp->link_config.active_speed == SPEED_100 ||
2389              tp->link_config.active_speed == SPEED_1000)) {
2390                 u32 eeectl;
2391
2392                 if (tp->link_config.active_speed == SPEED_1000)
2393                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2394                 else
2395                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2396
2397                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2398
2399                 tg3_eee_pull_config(tp, NULL);
2400                 if (tp->eee.eee_active)
2401                         tp->setlpicnt = 2;
2402         }
2403
2404         if (!tp->setlpicnt) {
2405                 if (current_link_up &&
2406                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2407                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2408                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2409                 }
2410
2411                 val = tr32(TG3_CPMU_EEE_MODE);
2412                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2413         }
2414 }
2415
2416 static void tg3_phy_eee_enable(struct tg3 *tp)
2417 {
2418         u32 val;
2419
2420         if (tp->link_config.active_speed == SPEED_1000 &&
2421             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2422              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2423              tg3_flag(tp, 57765_CLASS)) &&
2424             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2425                 val = MII_TG3_DSP_TAP26_ALNOKO |
2426                       MII_TG3_DSP_TAP26_RMRXSTO;
2427                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2428                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2429         }
2430
2431         val = tr32(TG3_CPMU_EEE_MODE);
2432         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2433 }
2434
2435 static int tg3_wait_macro_done(struct tg3 *tp)
2436 {
2437         int limit = 100;
2438
2439         while (limit--) {
2440                 u32 tmp32;
2441
2442                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2443                         if ((tmp32 & 0x1000) == 0)
2444                                 break;
2445                 }
2446         }
2447         if (limit < 0)
2448                 return -EBUSY;
2449
2450         return 0;
2451 }
2452
2453 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2454 {
2455         static const u32 test_pat[4][6] = {
2456         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2457         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2458         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2459         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2460         };
2461         int chan;
2462
2463         for (chan = 0; chan < 4; chan++) {
2464                 int i;
2465
2466                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2467                              (chan * 0x2000) | 0x0200);
2468                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2469
2470                 for (i = 0; i < 6; i++)
2471                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2472                                      test_pat[chan][i]);
2473
2474                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2475                 if (tg3_wait_macro_done(tp)) {
2476                         *resetp = 1;
2477                         return -EBUSY;
2478                 }
2479
2480                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2481                              (chan * 0x2000) | 0x0200);
2482                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2483                 if (tg3_wait_macro_done(tp)) {
2484                         *resetp = 1;
2485                         return -EBUSY;
2486                 }
2487
2488                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2489                 if (tg3_wait_macro_done(tp)) {
2490                         *resetp = 1;
2491                         return -EBUSY;
2492                 }
2493
2494                 for (i = 0; i < 6; i += 2) {
2495                         u32 low, high;
2496
2497                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2498                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2499                             tg3_wait_macro_done(tp)) {
2500                                 *resetp = 1;
2501                                 return -EBUSY;
2502                         }
2503                         low &= 0x7fff;
2504                         high &= 0x000f;
2505                         if (low != test_pat[chan][i] ||
2506                             high != test_pat[chan][i+1]) {
2507                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2508                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2509                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2510
2511                                 return -EBUSY;
2512                         }
2513                 }
2514         }
2515
2516         return 0;
2517 }
2518
2519 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2520 {
2521         int chan;
2522
2523         for (chan = 0; chan < 4; chan++) {
2524                 int i;
2525
2526                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2527                              (chan * 0x2000) | 0x0200);
2528                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2529                 for (i = 0; i < 6; i++)
2530                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2531                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2532                 if (tg3_wait_macro_done(tp))
2533                         return -EBUSY;
2534         }
2535
2536         return 0;
2537 }
2538
2539 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2540 {
2541         u32 reg32, phy9_orig;
2542         int retries, do_phy_reset, err;
2543
2544         retries = 10;
2545         do_phy_reset = 1;
2546         do {
2547                 if (do_phy_reset) {
2548                         err = tg3_bmcr_reset(tp);
2549                         if (err)
2550                                 return err;
2551                         do_phy_reset = 0;
2552                 }
2553
2554                 /* Disable transmitter and interrupt.  */
2555                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2556                         continue;
2557
2558                 reg32 |= 0x3000;
2559                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2560
2561                 /* Set full-duplex, 1000 mbps.  */
2562                 tg3_writephy(tp, MII_BMCR,
2563                              BMCR_FULLDPLX | BMCR_SPEED1000);
2564
2565                 /* Set to master mode.  */
2566                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2567                         continue;
2568
2569                 tg3_writephy(tp, MII_CTRL1000,
2570                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2571
2572                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2573                 if (err)
2574                         return err;
2575
2576                 /* Block the PHY control access.  */
2577                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2578
2579                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2580                 if (!err)
2581                         break;
2582         } while (--retries);
2583
2584         err = tg3_phy_reset_chanpat(tp);
2585         if (err)
2586                 return err;
2587
2588         tg3_phydsp_write(tp, 0x8005, 0x0000);
2589
2590         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2591         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2592
2593         tg3_phy_toggle_auxctl_smdsp(tp, false);
2594
2595         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2596
2597         err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2598         if (err)
2599                 return err;
2600
2601         reg32 &= ~0x3000;
2602         tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2603
2604         return 0;
2605 }
2606
2607 static void tg3_carrier_off(struct tg3 *tp)
2608 {
2609         netif_carrier_off(tp->dev);
2610         tp->link_up = false;
2611 }
2612
2613 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2614 {
2615         if (tg3_flag(tp, ENABLE_ASF))
2616                 netdev_warn(tp->dev,
2617                             "Management side-band traffic will be interrupted during phy settings change\n");
2618 }
2619
2620 /* This will reset the tigon3 PHY if there is no valid
2621  * link unless the FORCE argument is non-zero.
2622  */
2623 static int tg3_phy_reset(struct tg3 *tp)
2624 {
2625         u32 val, cpmuctrl;
2626         int err;
2627
2628         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2629                 val = tr32(GRC_MISC_CFG);
2630                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2631                 udelay(40);
2632         }
2633         err  = tg3_readphy(tp, MII_BMSR, &val);
2634         err |= tg3_readphy(tp, MII_BMSR, &val);
2635         if (err != 0)
2636                 return -EBUSY;
2637
2638         if (netif_running(tp->dev) && tp->link_up) {
2639                 netif_carrier_off(tp->dev);
2640                 tg3_link_report(tp);
2641         }
2642
2643         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2644             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2645             tg3_asic_rev(tp) == ASIC_REV_5705) {
2646                 err = tg3_phy_reset_5703_4_5(tp);
2647                 if (err)
2648                         return err;
2649                 goto out;
2650         }
2651
2652         cpmuctrl = 0;
2653         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2654             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2655                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2656                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2657                         tw32(TG3_CPMU_CTRL,
2658                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2659         }
2660
2661         err = tg3_bmcr_reset(tp);
2662         if (err)
2663                 return err;
2664
2665         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2666                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2667                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2668
2669                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2670         }
2671
2672         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2673             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2674                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2675                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2676                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2677                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2678                         udelay(40);
2679                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2680                 }
2681         }
2682
2683         if (tg3_flag(tp, 5717_PLUS) &&
2684             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2685                 return 0;
2686
2687         tg3_phy_apply_otp(tp);
2688
2689         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2690                 tg3_phy_toggle_apd(tp, true);
2691         else
2692                 tg3_phy_toggle_apd(tp, false);
2693
2694 out:
2695         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2696             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2697                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2698                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2699                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2700         }
2701
2702         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2703                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2704                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2705         }
2706
2707         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2708                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2709                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2710                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2711                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2712                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2713                 }
2714         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2715                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2716                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2717                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2718                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2719                                 tg3_writephy(tp, MII_TG3_TEST1,
2720                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2721                         } else
2722                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2723
2724                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2725                 }
2726         }
2727
2728         /* Set Extended packet length bit (bit 14) on all chips that */
2729         /* support jumbo frames */
2730         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2731                 /* Cannot do read-modify-write on 5401 */
2732                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2733         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2734                 /* Set bit 14 with read-modify-write to preserve other bits */
2735                 err = tg3_phy_auxctl_read(tp,
2736                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2737                 if (!err)
2738                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2739                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2740         }
2741
2742         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2743          * jumbo frames transmission.
2744          */
2745         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2746                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2747                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2748                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2749         }
2750
2751         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2752                 /* adjust output voltage */
2753                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2754         }
2755
2756         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2757                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2758
2759         tg3_phy_toggle_automdix(tp, true);
2760         tg3_phy_set_wirespeed(tp);
2761         return 0;
2762 }
2763
2764 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2765 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2766 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2767                                           TG3_GPIO_MSG_NEED_VAUX)
2768 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2769         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2770          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2771          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2772          (TG3_GPIO_MSG_DRVR_PRES << 12))
2773
2774 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2775         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2776          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2777          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2778          (TG3_GPIO_MSG_NEED_VAUX << 12))
2779
2780 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2781 {
2782         u32 status, shift;
2783
2784         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2785             tg3_asic_rev(tp) == ASIC_REV_5719)
2786                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2787         else
2788                 status = tr32(TG3_CPMU_DRV_STATUS);
2789
2790         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2791         status &= ~(TG3_GPIO_MSG_MASK << shift);
2792         status |= (newstat << shift);
2793
2794         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2795             tg3_asic_rev(tp) == ASIC_REV_5719)
2796                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2797         else
2798                 tw32(TG3_CPMU_DRV_STATUS, status);
2799
2800         return status >> TG3_APE_GPIO_MSG_SHIFT;
2801 }
2802
2803 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2804 {
2805         if (!tg3_flag(tp, IS_NIC))
2806                 return 0;
2807
2808         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2809             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2810             tg3_asic_rev(tp) == ASIC_REV_5720) {
2811                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2812                         return -EIO;
2813
2814                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2815
2816                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2817                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2818
2819                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2820         } else {
2821                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2822                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2823         }
2824
2825         return 0;
2826 }
2827
2828 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2829 {
2830         u32 grc_local_ctrl;
2831
2832         if (!tg3_flag(tp, IS_NIC) ||
2833             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2834             tg3_asic_rev(tp) == ASIC_REV_5701)
2835                 return;
2836
2837         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2838
2839         tw32_wait_f(GRC_LOCAL_CTRL,
2840                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2841                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2842
2843         tw32_wait_f(GRC_LOCAL_CTRL,
2844                     grc_local_ctrl,
2845                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2846
2847         tw32_wait_f(GRC_LOCAL_CTRL,
2848                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2849                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2850 }
2851
2852 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2853 {
2854         if (!tg3_flag(tp, IS_NIC))
2855                 return;
2856
2857         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2858             tg3_asic_rev(tp) == ASIC_REV_5701) {
2859                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2860                             (GRC_LCLCTRL_GPIO_OE0 |
2861                              GRC_LCLCTRL_GPIO_OE1 |
2862                              GRC_LCLCTRL_GPIO_OE2 |
2863                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2864                              GRC_LCLCTRL_GPIO_OUTPUT1),
2865                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2866         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2867                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2868                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2869                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2870                                      GRC_LCLCTRL_GPIO_OE1 |
2871                                      GRC_LCLCTRL_GPIO_OE2 |
2872                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2873                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2874                                      tp->grc_local_ctrl;
2875                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2876                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2877
2878                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2879                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2880                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2881
2882                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2883                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2884                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2885         } else {
2886                 u32 no_gpio2;
2887                 u32 grc_local_ctrl = 0;
2888
2889                 /* Workaround to prevent overdrawing Amps. */
2890                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2891                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2892                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2893                                     grc_local_ctrl,
2894                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2895                 }
2896
2897                 /* On 5753 and variants, GPIO2 cannot be used. */
2898                 no_gpio2 = tp->nic_sram_data_cfg &
2899                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2900
2901                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2902                                   GRC_LCLCTRL_GPIO_OE1 |
2903                                   GRC_LCLCTRL_GPIO_OE2 |
2904                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2905                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2906                 if (no_gpio2) {
2907                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2908                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2909                 }
2910                 tw32_wait_f(GRC_LOCAL_CTRL,
2911                             tp->grc_local_ctrl | grc_local_ctrl,
2912                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2913
2914                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2915
2916                 tw32_wait_f(GRC_LOCAL_CTRL,
2917                             tp->grc_local_ctrl | grc_local_ctrl,
2918                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2919
2920                 if (!no_gpio2) {
2921                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2922                         tw32_wait_f(GRC_LOCAL_CTRL,
2923                                     tp->grc_local_ctrl | grc_local_ctrl,
2924                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2925                 }
2926         }
2927 }
2928
2929 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2930 {
2931         u32 msg = 0;
2932
2933         /* Serialize power state transitions */
2934         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2935                 return;
2936
2937         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2938                 msg = TG3_GPIO_MSG_NEED_VAUX;
2939
2940         msg = tg3_set_function_status(tp, msg);
2941
2942         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2943                 goto done;
2944
2945         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2946                 tg3_pwrsrc_switch_to_vaux(tp);
2947         else
2948                 tg3_pwrsrc_die_with_vmain(tp);
2949
2950 done:
2951         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2952 }
2953
2954 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2955 {
2956         bool need_vaux = false;
2957
2958         /* The GPIOs do something completely different on 57765. */
2959         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2960                 return;
2961
2962         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2963             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2964             tg3_asic_rev(tp) == ASIC_REV_5720) {
2965                 tg3_frob_aux_power_5717(tp, include_wol ?
2966                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2967                 return;
2968         }
2969
2970         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2971                 struct net_device *dev_peer;
2972
2973                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2974
2975                 /* remove_one() may have been run on the peer. */
2976                 if (dev_peer) {
2977                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2978
2979                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2980                                 return;
2981
2982                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2983                             tg3_flag(tp_peer, ENABLE_ASF))
2984                                 need_vaux = true;
2985                 }
2986         }
2987
2988         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2989             tg3_flag(tp, ENABLE_ASF))
2990                 need_vaux = true;
2991
2992         if (need_vaux)
2993                 tg3_pwrsrc_switch_to_vaux(tp);
2994         else
2995                 tg3_pwrsrc_die_with_vmain(tp);
2996 }
2997
2998 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2999 {
3000         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3001                 return 1;
3002         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3003                 if (speed != SPEED_10)
3004                         return 1;
3005         } else if (speed == SPEED_10)
3006                 return 1;
3007
3008         return 0;
3009 }
3010
3011 static bool tg3_phy_power_bug(struct tg3 *tp)
3012 {
3013         switch (tg3_asic_rev(tp)) {
3014         case ASIC_REV_5700:
3015         case ASIC_REV_5704:
3016                 return true;
3017         case ASIC_REV_5780:
3018                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3019                         return true;
3020                 return false;
3021         case ASIC_REV_5717:
3022                 if (!tp->pci_fn)
3023                         return true;
3024                 return false;
3025         case ASIC_REV_5719:
3026         case ASIC_REV_5720:
3027                 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3028                     !tp->pci_fn)
3029                         return true;
3030                 return false;
3031         }
3032
3033         return false;
3034 }
3035
3036 static bool tg3_phy_led_bug(struct tg3 *tp)
3037 {
3038         switch (tg3_asic_rev(tp)) {
3039         case ASIC_REV_5719:
3040         case ASIC_REV_5720:
3041                 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3042                     !tp->pci_fn)
3043                         return true;
3044                 return false;
3045         }
3046
3047         return false;
3048 }
3049
3050 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3051 {
3052         u32 val;
3053
3054         if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3055                 return;
3056
3057         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3058                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3059                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3060                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3061
3062                         sg_dig_ctrl |=
3063                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3064                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
3065                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3066                 }
3067                 return;
3068         }
3069
3070         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3071                 tg3_bmcr_reset(tp);
3072                 val = tr32(GRC_MISC_CFG);
3073                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3074                 udelay(40);
3075                 return;
3076         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3077                 u32 phytest;
3078                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3079                         u32 phy;
3080
3081                         tg3_writephy(tp, MII_ADVERTISE, 0);
3082                         tg3_writephy(tp, MII_BMCR,
3083                                      BMCR_ANENABLE | BMCR_ANRESTART);
3084
3085                         tg3_writephy(tp, MII_TG3_FET_TEST,
3086                                      phytest | MII_TG3_FET_SHADOW_EN);
3087                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3088                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3089                                 tg3_writephy(tp,
3090                                              MII_TG3_FET_SHDW_AUXMODE4,
3091                                              phy);
3092                         }
3093                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3094                 }
3095                 return;
3096         } else if (do_low_power) {
3097                 if (!tg3_phy_led_bug(tp))
3098                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3099                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3100
3101                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3102                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3103                       MII_TG3_AUXCTL_PCTL_VREG_11V;
3104                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3105         }
3106
3107         /* The PHY should not be powered down on some chips because
3108          * of bugs.
3109          */
3110         if (tg3_phy_power_bug(tp))
3111                 return;
3112
3113         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3114             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3115                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3116                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3117                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3118                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3119         }
3120
3121         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3122 }
3123
3124 /* tp->lock is held. */
3125 static int tg3_nvram_lock(struct tg3 *tp)
3126 {
3127         if (tg3_flag(tp, NVRAM)) {
3128                 int i;
3129
3130                 if (tp->nvram_lock_cnt == 0) {
3131                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3132                         for (i = 0; i < 8000; i++) {
3133                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3134                                         break;
3135                                 udelay(20);
3136                         }
3137                         if (i == 8000) {
3138                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3139                                 return -ENODEV;
3140                         }
3141                 }
3142                 tp->nvram_lock_cnt++;
3143         }
3144         return 0;
3145 }
3146
3147 /* tp->lock is held. */
3148 static void tg3_nvram_unlock(struct tg3 *tp)
3149 {
3150         if (tg3_flag(tp, NVRAM)) {
3151                 if (tp->nvram_lock_cnt > 0)
3152                         tp->nvram_lock_cnt--;
3153                 if (tp->nvram_lock_cnt == 0)
3154                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3155         }
3156 }
3157
3158 /* tp->lock is held. */
3159 static void tg3_enable_nvram_access(struct tg3 *tp)
3160 {
3161         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3162                 u32 nvaccess = tr32(NVRAM_ACCESS);
3163
3164                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3165         }
3166 }
3167
3168 /* tp->lock is held. */
3169 static void tg3_disable_nvram_access(struct tg3 *tp)
3170 {
3171         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3172                 u32 nvaccess = tr32(NVRAM_ACCESS);
3173
3174                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3175         }
3176 }
3177
3178 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3179                                         u32 offset, u32 *val)
3180 {
3181         u32 tmp;
3182         int i;
3183
3184         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3185                 return -EINVAL;
3186
3187         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3188                                         EEPROM_ADDR_DEVID_MASK |
3189                                         EEPROM_ADDR_READ);
3190         tw32(GRC_EEPROM_ADDR,
3191              tmp |
3192              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3193              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3194               EEPROM_ADDR_ADDR_MASK) |
3195              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3196
3197         for (i = 0; i < 1000; i++) {
3198                 tmp = tr32(GRC_EEPROM_ADDR);
3199
3200                 if (tmp & EEPROM_ADDR_COMPLETE)
3201                         break;
3202                 msleep(1);
3203         }
3204         if (!(tmp & EEPROM_ADDR_COMPLETE))
3205                 return -EBUSY;
3206
3207         tmp = tr32(GRC_EEPROM_DATA);
3208
3209         /*
3210          * The data will always be opposite the native endian
3211          * format.  Perform a blind byteswap to compensate.
3212          */
3213         *val = swab32(tmp);
3214
3215         return 0;
3216 }
3217
3218 #define NVRAM_CMD_TIMEOUT 5000
3219
3220 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3221 {
3222         int i;
3223
3224         tw32(NVRAM_CMD, nvram_cmd);
3225         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3226                 usleep_range(10, 40);
3227                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3228                         udelay(10);
3229                         break;
3230                 }
3231         }
3232
3233         if (i == NVRAM_CMD_TIMEOUT)
3234                 return -EBUSY;
3235
3236         return 0;
3237 }
3238
3239 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3240 {
3241         if (tg3_flag(tp, NVRAM) &&
3242             tg3_flag(tp, NVRAM_BUFFERED) &&
3243             tg3_flag(tp, FLASH) &&
3244             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3245             (tp->nvram_jedecnum == JEDEC_ATMEL))
3246
3247                 addr = ((addr / tp->nvram_pagesize) <<
3248                         ATMEL_AT45DB0X1B_PAGE_POS) +
3249                        (addr % tp->nvram_pagesize);
3250
3251         return addr;
3252 }
3253
3254 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3255 {
3256         if (tg3_flag(tp, NVRAM) &&
3257             tg3_flag(tp, NVRAM_BUFFERED) &&
3258             tg3_flag(tp, FLASH) &&
3259             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3260             (tp->nvram_jedecnum == JEDEC_ATMEL))
3261
3262                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3263                         tp->nvram_pagesize) +
3264                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3265
3266         return addr;
3267 }
3268
3269 /* NOTE: Data read in from NVRAM is byteswapped according to
3270  * the byteswapping settings for all other register accesses.
3271  * tg3 devices are BE devices, so on a BE machine, the data
3272  * returned will be exactly as it is seen in NVRAM.  On a LE
3273  * machine, the 32-bit value will be byteswapped.
3274  */
3275 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3276 {
3277         int ret;
3278
3279         if (!tg3_flag(tp, NVRAM))
3280                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3281
3282         offset = tg3_nvram_phys_addr(tp, offset);
3283
3284         if (offset > NVRAM_ADDR_MSK)
3285                 return -EINVAL;
3286
3287         ret = tg3_nvram_lock(tp);
3288         if (ret)
3289                 return ret;
3290
3291         tg3_enable_nvram_access(tp);
3292
3293         tw32(NVRAM_ADDR, offset);
3294         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3295                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3296
3297         if (ret == 0)
3298                 *val = tr32(NVRAM_RDDATA);
3299
3300         tg3_disable_nvram_access(tp);
3301
3302         tg3_nvram_unlock(tp);
3303
3304         return ret;
3305 }
3306
3307 /* Ensures NVRAM data is in bytestream format. */
3308 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3309 {
3310         u32 v;
3311         int res = tg3_nvram_read(tp, offset, &v);
3312         if (!res)
3313                 *val = cpu_to_be32(v);
3314         return res;
3315 }
3316
3317 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3318                                     u32 offset, u32 len, u8 *buf)
3319 {
3320         int i, j, rc = 0;
3321         u32 val;
3322
3323         for (i = 0; i < len; i += 4) {
3324                 u32 addr;
3325                 __be32 data;
3326
3327                 addr = offset + i;
3328
3329                 memcpy(&data, buf + i, 4);
3330
3331                 /*
3332                  * The SEEPROM interface expects the data to always be opposite
3333                  * the native endian format.  We accomplish this by reversing
3334                  * all the operations that would have been performed on the
3335                  * data from a call to tg3_nvram_read_be32().
3336                  */
3337                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3338
3339                 val = tr32(GRC_EEPROM_ADDR);
3340                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3341
3342                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3343                         EEPROM_ADDR_READ);
3344                 tw32(GRC_EEPROM_ADDR, val |
3345                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3346                         (addr & EEPROM_ADDR_ADDR_MASK) |
3347                         EEPROM_ADDR_START |
3348                         EEPROM_ADDR_WRITE);
3349
3350                 for (j = 0; j < 1000; j++) {
3351                         val = tr32(GRC_EEPROM_ADDR);
3352
3353                         if (val & EEPROM_ADDR_COMPLETE)
3354                                 break;
3355                         msleep(1);
3356                 }
3357                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3358                         rc = -EBUSY;
3359                         break;
3360                 }
3361         }
3362
3363         return rc;
3364 }
3365
3366 /* offset and length are dword aligned */
3367 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3368                 u8 *buf)
3369 {
3370         int ret = 0;
3371         u32 pagesize = tp->nvram_pagesize;
3372         u32 pagemask = pagesize - 1;
3373         u32 nvram_cmd;
3374         u8 *tmp;
3375
3376         tmp = kmalloc(pagesize, GFP_KERNEL);
3377         if (tmp == NULL)
3378                 return -ENOMEM;
3379
3380         while (len) {
3381                 int j;
3382                 u32 phy_addr, page_off, size;
3383
3384                 phy_addr = offset & ~pagemask;
3385
3386                 for (j = 0; j < pagesize; j += 4) {
3387                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3388                                                   (__be32 *) (tmp + j));
3389                         if (ret)
3390                                 break;
3391                 }
3392                 if (ret)
3393                         break;
3394
3395                 page_off = offset & pagemask;
3396                 size = pagesize;
3397                 if (len < size)
3398                         size = len;
3399
3400                 len -= size;
3401
3402                 memcpy(tmp + page_off, buf, size);
3403
3404                 offset = offset + (pagesize - page_off);
3405
3406                 tg3_enable_nvram_access(tp);
3407
3408                 /*
3409                  * Before we can erase the flash page, we need
3410                  * to issue a special "write enable" command.
3411                  */
3412                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3413
3414                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3415                         break;
3416
3417                 /* Erase the target page */
3418                 tw32(NVRAM_ADDR, phy_addr);
3419
3420                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3421                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3422
3423                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3424                         break;
3425
3426                 /* Issue another write enable to start the write. */
3427                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3428
3429                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3430                         break;
3431
3432                 for (j = 0; j < pagesize; j += 4) {
3433                         __be32 data;
3434
3435                         data = *((__be32 *) (tmp + j));
3436
3437                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3438
3439                         tw32(NVRAM_ADDR, phy_addr + j);
3440
3441                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3442                                 NVRAM_CMD_WR;
3443
3444                         if (j == 0)
3445                                 nvram_cmd |= NVRAM_CMD_FIRST;
3446                         else if (j == (pagesize - 4))
3447                                 nvram_cmd |= NVRAM_CMD_LAST;
3448
3449                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3450                         if (ret)
3451                                 break;
3452                 }
3453                 if (ret)
3454                         break;
3455         }
3456
3457         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3458         tg3_nvram_exec_cmd(tp, nvram_cmd);
3459
3460         kfree(tmp);
3461
3462         return ret;
3463 }
3464
3465 /* offset and length are dword aligned */
3466 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3467                 u8 *buf)
3468 {
3469         int i, ret = 0;
3470
3471         for (i = 0; i < len; i += 4, offset += 4) {
3472                 u32 page_off, phy_addr, nvram_cmd;
3473                 __be32 data;
3474
3475                 memcpy(&data, buf + i, 4);
3476                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3477
3478                 page_off = offset % tp->nvram_pagesize;
3479
3480                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3481
3482                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3483
3484                 if (page_off == 0 || i == 0)
3485                         nvram_cmd |= NVRAM_CMD_FIRST;
3486                 if (page_off == (tp->nvram_pagesize - 4))
3487                         nvram_cmd |= NVRAM_CMD_LAST;
3488
3489                 if (i == (len - 4))
3490                         nvram_cmd |= NVRAM_CMD_LAST;
3491
3492                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3493                     !tg3_flag(tp, FLASH) ||
3494                     !tg3_flag(tp, 57765_PLUS))
3495                         tw32(NVRAM_ADDR, phy_addr);
3496
3497                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3498                     !tg3_flag(tp, 5755_PLUS) &&
3499                     (tp->nvram_jedecnum == JEDEC_ST) &&
3500                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3501                         u32 cmd;
3502
3503                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3504                         ret = tg3_nvram_exec_cmd(tp, cmd);
3505                         if (ret)
3506                                 break;
3507                 }
3508                 if (!tg3_flag(tp, FLASH)) {
3509                         /* We always do complete word writes to eeprom. */
3510                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3511                 }
3512
3513                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3514                 if (ret)
3515                         break;
3516         }
3517         return ret;
3518 }
3519
3520 /* offset and length are dword aligned */
3521 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3522 {
3523         int ret;
3524
3525         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3526                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3527                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3528                 udelay(40);
3529         }
3530
3531         if (!tg3_flag(tp, NVRAM)) {
3532                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3533         } else {
3534                 u32 grc_mode;
3535
3536                 ret = tg3_nvram_lock(tp);
3537                 if (ret)
3538                         return ret;
3539
3540                 tg3_enable_nvram_access(tp);
3541                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3542                         tw32(NVRAM_WRITE1, 0x406);
3543
3544                 grc_mode = tr32(GRC_MODE);
3545                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3546
3547                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3548                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3549                                 buf);
3550                 } else {
3551                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3552                                 buf);
3553                 }
3554
3555                 grc_mode = tr32(GRC_MODE);
3556                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3557
3558                 tg3_disable_nvram_access(tp);
3559                 tg3_nvram_unlock(tp);
3560         }
3561
3562         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3563                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3564                 udelay(40);
3565         }
3566
3567         return ret;
3568 }
3569
3570 #define RX_CPU_SCRATCH_BASE     0x30000
3571 #define RX_CPU_SCRATCH_SIZE     0x04000
3572 #define TX_CPU_SCRATCH_BASE     0x34000
3573 #define TX_CPU_SCRATCH_SIZE     0x04000
3574
3575 /* tp->lock is held. */
3576 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3577 {
3578         int i;
3579         const int iters = 10000;
3580
3581         for (i = 0; i < iters; i++) {
3582                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3583                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3584                 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3585                         break;
3586                 if (pci_channel_offline(tp->pdev))
3587                         return -EBUSY;
3588         }
3589
3590         return (i == iters) ? -EBUSY : 0;
3591 }
3592
3593 /* tp->lock is held. */
3594 static int tg3_rxcpu_pause(struct tg3 *tp)
3595 {
3596         int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3597
3598         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3599         tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3600         udelay(10);
3601
3602         return rc;
3603 }
3604
3605 /* tp->lock is held. */
3606 static int tg3_txcpu_pause(struct tg3 *tp)
3607 {
3608         return tg3_pause_cpu(tp, TX_CPU_BASE);
3609 }
3610
3611 /* tp->lock is held. */
3612 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3613 {
3614         tw32(cpu_base + CPU_STATE, 0xffffffff);
3615         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3616 }
3617
3618 /* tp->lock is held. */
3619 static void tg3_rxcpu_resume(struct tg3 *tp)
3620 {
3621         tg3_resume_cpu(tp, RX_CPU_BASE);
3622 }
3623
3624 /* tp->lock is held. */
3625 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3626 {
3627         int rc;
3628
3629         BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3630
3631         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3632                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3633
3634                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3635                 return 0;
3636         }
3637         if (cpu_base == RX_CPU_BASE) {
3638                 rc = tg3_rxcpu_pause(tp);
3639         } else {
3640                 /*
3641                  * There is only an Rx CPU for the 5750 derivative in the
3642                  * BCM4785.
3643                  */
3644                 if (tg3_flag(tp, IS_SSB_CORE))
3645                         return 0;
3646
3647                 rc = tg3_txcpu_pause(tp);
3648         }
3649
3650         if (rc) {
3651                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3652                            __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3653                 return -ENODEV;
3654         }
3655
3656         /* Clear firmware's nvram arbitration. */
3657         if (tg3_flag(tp, NVRAM))
3658                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3659         return 0;
3660 }
3661
3662 static int tg3_fw_data_len(struct tg3 *tp,
3663                            const struct tg3_firmware_hdr *fw_hdr)
3664 {
3665         int fw_len;
3666
3667         /* Non fragmented firmware have one firmware header followed by a
3668          * contiguous chunk of data to be written. The length field in that
3669          * header is not the length of data to be written but the complete
3670          * length of the bss. The data length is determined based on
3671          * tp->fw->size minus headers.
3672          *
3673          * Fragmented firmware have a main header followed by multiple
3674          * fragments. Each fragment is identical to non fragmented firmware
3675          * with a firmware header followed by a contiguous chunk of data. In
3676          * the main header, the length field is unused and set to 0xffffffff.
3677          * In each fragment header the length is the entire size of that
3678          * fragment i.e. fragment data + header length. Data length is
3679          * therefore length field in the header minus TG3_FW_HDR_LEN.
3680          */
3681         if (tp->fw_len == 0xffffffff)
3682                 fw_len = be32_to_cpu(fw_hdr->len);
3683         else
3684                 fw_len = tp->fw->size;
3685
3686         return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3687 }
3688
3689 /* tp->lock is held. */
3690 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3691                                  u32 cpu_scratch_base, int cpu_scratch_size,
3692                                  const struct tg3_firmware_hdr *fw_hdr)
3693 {
3694         int err, i;
3695         void (*write_op)(struct tg3 *, u32, u32);
3696         int total_len = tp->fw->size;
3697
3698         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3699                 netdev_err(tp->dev,
3700                            "%s: Trying to load TX cpu firmware which is 5705\n",
3701                            __func__);
3702                 return -EINVAL;
3703         }
3704
3705         if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3706                 write_op = tg3_write_mem;
3707         else
3708                 write_op = tg3_write_indirect_reg32;
3709
3710         if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3711                 /* It is possible that bootcode is still loading at this point.
3712                  * Get the nvram lock first before halting the cpu.
3713                  */
3714                 int lock_err = tg3_nvram_lock(tp);
3715                 err = tg3_halt_cpu(tp, cpu_base);
3716                 if (!lock_err)
3717                         tg3_nvram_unlock(tp);
3718                 if (err)
3719                         goto out;
3720
3721                 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3722                         write_op(tp, cpu_scratch_base + i, 0);
3723                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3724                 tw32(cpu_base + CPU_MODE,
3725                      tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3726         } else {
3727                 /* Subtract additional main header for fragmented firmware and
3728                  * advance to the first fragment
3729                  */
3730                 total_len -= TG3_FW_HDR_LEN;
3731                 fw_hdr++;
3732         }
3733
3734         do {
3735                 u32 *fw_data = (u32 *)(fw_hdr + 1);
3736                 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3737                         write_op(tp, cpu_scratch_base +
3738                                      (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3739                                      (i * sizeof(u32)),
3740                                  be32_to_cpu(fw_data[i]));
3741
3742                 total_len -= be32_to_cpu(fw_hdr->len);
3743
3744                 /* Advance to next fragment */
3745                 fw_hdr = (struct tg3_firmware_hdr *)
3746                          ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3747         } while (total_len > 0);
3748
3749         err = 0;
3750
3751 out:
3752         return err;
3753 }
3754
3755 /* tp->lock is held. */
3756 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3757 {
3758         int i;
3759         const int iters = 5;
3760
3761         tw32(cpu_base + CPU_STATE, 0xffffffff);
3762         tw32_f(cpu_base + CPU_PC, pc);
3763
3764         for (i = 0; i < iters; i++) {
3765                 if (tr32(cpu_base + CPU_PC) == pc)
3766                         break;
3767                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3768                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3769                 tw32_f(cpu_base + CPU_PC, pc);
3770                 udelay(1000);
3771         }
3772
3773         return (i == iters) ? -EBUSY : 0;
3774 }
3775
3776 /* tp->lock is held. */
3777 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3778 {
3779         const struct tg3_firmware_hdr *fw_hdr;
3780         int err;
3781
3782         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3783
3784         /* Firmware blob starts with version numbers, followed by
3785            start address and length. We are setting complete length.
3786            length = end_address_of_bss - start_address_of_text.
3787            Remainder is the blob to be loaded contiguously
3788            from start address. */
3789
3790         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3791                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3792                                     fw_hdr);
3793         if (err)
3794                 return err;
3795
3796         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3797                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3798                                     fw_hdr);
3799         if (err)
3800                 return err;
3801
3802         /* Now startup only the RX cpu. */
3803         err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3804                                        be32_to_cpu(fw_hdr->base_addr));
3805         if (err) {
3806                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3807                            "should be %08x\n", __func__,
3808                            tr32(RX_CPU_BASE + CPU_PC),
3809                                 be32_to_cpu(fw_hdr->base_addr));
3810                 return -ENODEV;
3811         }
3812
3813         tg3_rxcpu_resume(tp);
3814
3815         return 0;
3816 }
3817
3818 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3819 {
3820         const int iters = 1000;
3821         int i;
3822         u32 val;
3823
3824         /* Wait for boot code to complete initialization and enter service
3825          * loop. It is then safe to download service patches
3826          */
3827         for (i = 0; i < iters; i++) {
3828                 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3829                         break;
3830
3831                 udelay(10);
3832         }
3833
3834         if (i == iters) {
3835                 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3836                 return -EBUSY;
3837         }
3838
3839         val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3840         if (val & 0xff) {
3841                 netdev_warn(tp->dev,
3842                             "Other patches exist. Not downloading EEE patch\n");
3843                 return -EEXIST;
3844         }
3845
3846         return 0;
3847 }
3848
3849 /* tp->lock is held. */
3850 static void tg3_load_57766_firmware(struct tg3 *tp)
3851 {
3852         struct tg3_firmware_hdr *fw_hdr;
3853
3854         if (!tg3_flag(tp, NO_NVRAM))
3855                 return;
3856
3857         if (tg3_validate_rxcpu_state(tp))
3858                 return;
3859
3860         if (!tp->fw)
3861                 return;
3862
3863         /* This firmware blob has a different format than older firmware
3864          * releases as given below. The main difference is we have fragmented
3865          * data to be written to non-contiguous locations.
3866          *
3867          * In the beginning we have a firmware header identical to other
3868          * firmware which consists of version, base addr and length. The length
3869          * here is unused and set to 0xffffffff.
3870          *
3871          * This is followed by a series of firmware fragments which are
3872          * individually identical to previous firmware. i.e. they have the
3873          * firmware header and followed by data for that fragment. The version
3874          * field of the individual fragment header is unused.
3875          */
3876
3877         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3878         if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3879                 return;
3880
3881         if (tg3_rxcpu_pause(tp))
3882                 return;
3883
3884         /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3885         tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3886
3887         tg3_rxcpu_resume(tp);
3888 }
3889
3890 /* tp->lock is held. */
3891 static int tg3_load_tso_firmware(struct tg3 *tp)
3892 {
3893         const struct tg3_firmware_hdr *fw_hdr;
3894         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3895         int err;
3896
3897         if (!tg3_flag(tp, FW_TSO))
3898                 return 0;
3899
3900         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3901
3902         /* Firmware blob starts with version numbers, followed by
3903            start address and length. We are setting complete length.
3904            length = end_address_of_bss - start_address_of_text.
3905            Remainder is the blob to be loaded contiguously
3906            from start address. */
3907
3908         cpu_scratch_size = tp->fw_len;
3909
3910         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3911                 cpu_base = RX_CPU_BASE;
3912                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3913         } else {
3914                 cpu_base = TX_CPU_BASE;
3915                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3916                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3917         }
3918
3919         err = tg3_load_firmware_cpu(tp, cpu_base,
3920                                     cpu_scratch_base, cpu_scratch_size,
3921                                     fw_hdr);
3922         if (err)
3923                 return err;
3924
3925         /* Now startup the cpu. */
3926         err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3927                                        be32_to_cpu(fw_hdr->base_addr));
3928         if (err) {
3929                 netdev_err(tp->dev,
3930                            "%s fails to set CPU PC, is %08x should be %08x\n",
3931                            __func__, tr32(cpu_base + CPU_PC),
3932                            be32_to_cpu(fw_hdr->base_addr));
3933                 return -ENODEV;
3934         }
3935
3936         tg3_resume_cpu(tp, cpu_base);
3937         return 0;
3938 }
3939
3940 /* tp->lock is held. */
3941 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3942 {
3943         u32 addr_high, addr_low;
3944
3945         addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3946         addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3947                     (mac_addr[4] <<  8) | mac_addr[5]);
3948
3949         if (index < 4) {
3950                 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3951                 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3952         } else {
3953                 index -= 4;
3954                 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3955                 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3956         }
3957 }
3958
3959 /* tp->lock is held. */
3960 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3961 {
3962         u32 addr_high;
3963         int i;
3964
3965         for (i = 0; i < 4; i++) {
3966                 if (i == 1 && skip_mac_1)
3967                         continue;
3968                 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3969         }
3970
3971         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3972             tg3_asic_rev(tp) == ASIC_REV_5704) {
3973                 for (i = 4; i < 16; i++)
3974                         __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3975         }
3976
3977         addr_high = (tp->dev->dev_addr[0] +
3978                      tp->dev->dev_addr[1] +
3979                      tp->dev->dev_addr[2] +
3980                      tp->dev->dev_addr[3] +
3981                      tp->dev->dev_addr[4] +
3982                      tp->dev->dev_addr[5]) &
3983                 TX_BACKOFF_SEED_MASK;
3984         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3985 }
3986
3987 static void tg3_enable_register_access(struct tg3 *tp)
3988 {
3989         /*
3990          * Make sure register accesses (indirect or otherwise) will function
3991          * correctly.
3992          */
3993         pci_write_config_dword(tp->pdev,
3994                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3995 }
3996
3997 static int tg3_power_up(struct tg3 *tp)
3998 {
3999         int err;
4000
4001         tg3_enable_register_access(tp);
4002
4003         err = pci_set_power_state(tp->pdev, PCI_D0);
4004         if (!err) {
4005                 /* Switch out of Vaux if it is a NIC */
4006                 tg3_pwrsrc_switch_to_vmain(tp);
4007         } else {
4008                 netdev_err(tp->dev, "Transition to D0 failed\n");
4009         }
4010
4011         return err;
4012 }
4013
4014 static int tg3_setup_phy(struct tg3 *, bool);
4015
4016 static int tg3_power_down_prepare(struct tg3 *tp)
4017 {
4018         u32 misc_host_ctrl;
4019         bool device_should_wake, do_low_power;
4020
4021         tg3_enable_register_access(tp);
4022
4023         /* Restore the CLKREQ setting. */
4024         if (tg3_flag(tp, CLKREQ_BUG))
4025                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4026                                          PCI_EXP_LNKCTL_CLKREQ_EN);
4027
4028         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4029         tw32(TG3PCI_MISC_HOST_CTRL,
4030              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4031
4032         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4033                              tg3_flag(tp, WOL_ENABLE);
4034
4035         if (tg3_flag(tp, USE_PHYLIB)) {
4036                 do_low_power = false;
4037                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4038                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4039                         struct phy_device *phydev;
4040                         u32 phyid, advertising;
4041
4042                         phydev = tp->mdio_bus->phy_map[tp->phy_addr];
4043
4044                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4045
4046                         tp->link_config.speed = phydev->speed;
4047                         tp->link_config.duplex = phydev->duplex;
4048                         tp->link_config.autoneg = phydev->autoneg;
4049                         tp->link_config.advertising = phydev->advertising;
4050
4051                         advertising = ADVERTISED_TP |
4052                                       ADVERTISED_Pause |
4053                                       ADVERTISED_Autoneg |
4054                                       ADVERTISED_10baseT_Half;
4055
4056                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4057                                 if (tg3_flag(tp, WOL_SPEED_100MB))
4058                                         advertising |=
4059                                                 ADVERTISED_100baseT_Half |
4060                                                 ADVERTISED_100baseT_Full |
4061                                                 ADVERTISED_10baseT_Full;
4062                                 else
4063                                         advertising |= ADVERTISED_10baseT_Full;
4064                         }
4065
4066                         phydev->advertising = advertising;
4067
4068                         phy_start_aneg(phydev);
4069
4070                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4071                         if (phyid != PHY_ID_BCMAC131) {
4072                                 phyid &= PHY_BCM_OUI_MASK;
4073                                 if (phyid == PHY_BCM_OUI_1 ||
4074                                     phyid == PHY_BCM_OUI_2 ||
4075                                     phyid == PHY_BCM_OUI_3)
4076                                         do_low_power = true;
4077                         }
4078                 }
4079         } else {
4080                 do_low_power = true;
4081
4082                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4083                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4084
4085                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4086                         tg3_setup_phy(tp, false);
4087         }
4088
4089         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4090                 u32 val;
4091
4092                 val = tr32(GRC_VCPU_EXT_CTRL);
4093                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4094         } else if (!tg3_flag(tp, ENABLE_ASF)) {
4095                 int i;
4096                 u32 val;
4097
4098                 for (i = 0; i < 200; i++) {
4099                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4100                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4101                                 break;
4102                         msleep(1);
4103                 }
4104         }
4105         if (tg3_flag(tp, WOL_CAP))
4106                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4107                                                      WOL_DRV_STATE_SHUTDOWN |
4108                                                      WOL_DRV_WOL |
4109                                                      WOL_SET_MAGIC_PKT);
4110
4111         if (device_should_wake) {
4112                 u32 mac_mode;
4113
4114                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4115                         if (do_low_power &&
4116                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4117                                 tg3_phy_auxctl_write(tp,
4118                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4119                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
4120                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4121                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4122                                 udelay(40);
4123                         }
4124
4125                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4126                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
4127                         else if (tp->phy_flags &
4128                                  TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4129                                 if (tp->link_config.active_speed == SPEED_1000)
4130                                         mac_mode = MAC_MODE_PORT_MODE_GMII;
4131                                 else
4132                                         mac_mode = MAC_MODE_PORT_MODE_MII;
4133                         } else
4134                                 mac_mode = MAC_MODE_PORT_MODE_MII;
4135
4136                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4137                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4138                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4139                                              SPEED_100 : SPEED_10;
4140                                 if (tg3_5700_link_polarity(tp, speed))
4141                                         mac_mode |= MAC_MODE_LINK_POLARITY;
4142                                 else
4143                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
4144                         }
4145                 } else {
4146                         mac_mode = MAC_MODE_PORT_MODE_TBI;
4147                 }
4148
4149                 if (!tg3_flag(tp, 5750_PLUS))
4150                         tw32(MAC_LED_CTRL, tp->led_ctrl);
4151
4152                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4153                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4154                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4155                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4156
4157                 if (tg3_flag(tp, ENABLE_APE))
4158                         mac_mode |= MAC_MODE_APE_TX_EN |
4159                                     MAC_MODE_APE_RX_EN |
4160                                     MAC_MODE_TDE_ENABLE;
4161
4162                 tw32_f(MAC_MODE, mac_mode);
4163                 udelay(100);
4164
4165                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4166                 udelay(10);
4167         }
4168
4169         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4170             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4171              tg3_asic_rev(tp) == ASIC_REV_5701)) {
4172                 u32 base_val;
4173
4174                 base_val = tp->pci_clock_ctrl;
4175                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4176                              CLOCK_CTRL_TXCLK_DISABLE);
4177
4178                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4179                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
4180         } else if (tg3_flag(tp, 5780_CLASS) ||
4181                    tg3_flag(tp, CPMU_PRESENT) ||
4182                    tg3_asic_rev(tp) == ASIC_REV_5906) {
4183                 /* do nothing */
4184         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4185                 u32 newbits1, newbits2;
4186
4187                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4188                     tg3_asic_rev(tp) == ASIC_REV_5701) {
4189                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4190                                     CLOCK_CTRL_TXCLK_DISABLE |
4191                                     CLOCK_CTRL_ALTCLK);
4192                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4193                 } else if (tg3_flag(tp, 5705_PLUS)) {
4194                         newbits1 = CLOCK_CTRL_625_CORE;
4195                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4196                 } else {
4197                         newbits1 = CLOCK_CTRL_ALTCLK;
4198                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4199                 }
4200
4201                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4202                             40);
4203
4204                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4205                             40);
4206
4207                 if (!tg3_flag(tp, 5705_PLUS)) {
4208                         u32 newbits3;
4209
4210                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4211                             tg3_asic_rev(tp) == ASIC_REV_5701) {
4212                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4213                                             CLOCK_CTRL_TXCLK_DISABLE |
4214                                             CLOCK_CTRL_44MHZ_CORE);
4215                         } else {
4216                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4217                         }
4218
4219                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
4220                                     tp->pci_clock_ctrl | newbits3, 40);
4221                 }
4222         }
4223
4224         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4225                 tg3_power_down_phy(tp, do_low_power);
4226
4227         tg3_frob_aux_power(tp, true);
4228
4229         /* Workaround for unstable PLL clock */
4230         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4231             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4232              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4233                 u32 val = tr32(0x7d00);
4234
4235                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4236                 tw32(0x7d00, val);
4237                 if (!tg3_flag(tp, ENABLE_ASF)) {
4238                         int err;
4239
4240                         err = tg3_nvram_lock(tp);
4241                         tg3_halt_cpu(tp, RX_CPU_BASE);
4242                         if (!err)
4243                                 tg3_nvram_unlock(tp);
4244                 }
4245         }
4246
4247         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4248
4249         tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4250
4251         return 0;
4252 }
4253
4254 static void tg3_power_down(struct tg3 *tp)
4255 {
4256         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4257         pci_set_power_state(tp->pdev, PCI_D3hot);
4258 }
4259
4260 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4261 {
4262         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4263         case MII_TG3_AUX_STAT_10HALF:
4264                 *speed = SPEED_10;
4265                 *duplex = DUPLEX_HALF;
4266                 break;
4267
4268         case MII_TG3_AUX_STAT_10FULL:
4269                 *speed = SPEED_10;
4270                 *duplex = DUPLEX_FULL;
4271                 break;
4272
4273         case MII_TG3_AUX_STAT_100HALF:
4274                 *speed = SPEED_100;
4275                 *duplex = DUPLEX_HALF;
4276                 break;
4277
4278         case MII_TG3_AUX_STAT_100FULL:
4279                 *speed = SPEED_100;
4280                 *duplex = DUPLEX_FULL;
4281                 break;
4282
4283         case MII_TG3_AUX_STAT_1000HALF:
4284                 *speed = SPEED_1000;
4285                 *duplex = DUPLEX_HALF;
4286                 break;
4287
4288         case MII_TG3_AUX_STAT_1000FULL:
4289                 *speed = SPEED_1000;
4290                 *duplex = DUPLEX_FULL;
4291                 break;
4292
4293         default:
4294                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4295                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4296                                  SPEED_10;
4297                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4298                                   DUPLEX_HALF;
4299                         break;
4300                 }
4301                 *speed = SPEED_UNKNOWN;
4302                 *duplex = DUPLEX_UNKNOWN;
4303                 break;
4304         }
4305 }
4306
4307 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4308 {
4309         int err = 0;
4310         u32 val, new_adv;
4311
4312         new_adv = ADVERTISE_CSMA;
4313         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4314         new_adv |= mii_advertise_flowctrl(flowctrl);
4315
4316         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4317         if (err)
4318                 goto done;
4319
4320         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4321                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4322
4323                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4324                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4325                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4326
4327                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4328                 if (err)
4329                         goto done;
4330         }
4331
4332         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4333                 goto done;
4334
4335         tw32(TG3_CPMU_EEE_MODE,
4336              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4337
4338         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4339         if (!err) {
4340                 u32 err2;
4341
4342                 val = 0;
4343                 /* Advertise 100-BaseTX EEE ability */
4344                 if (advertise & ADVERTISED_100baseT_Full)
4345                         val |= MDIO_AN_EEE_ADV_100TX;
4346                 /* Advertise 1000-BaseT EEE ability */
4347                 if (advertise & ADVERTISED_1000baseT_Full)
4348                         val |= MDIO_AN_EEE_ADV_1000T;
4349
4350                 if (!tp->eee.eee_enabled) {
4351                         val = 0;
4352                         tp->eee.advertised = 0;
4353                 } else {
4354                         tp->eee.advertised = advertise &
4355                                              (ADVERTISED_100baseT_Full |
4356                                               ADVERTISED_1000baseT_Full);
4357                 }
4358
4359                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4360                 if (err)
4361                         val = 0;
4362
4363                 switch (tg3_asic_rev(tp)) {
4364                 case ASIC_REV_5717:
4365                 case ASIC_REV_57765:
4366                 case ASIC_REV_57766:
4367                 case ASIC_REV_5719:
4368                         /* If we advertised any eee advertisements above... */
4369                         if (val)
4370                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4371                                       MII_TG3_DSP_TAP26_RMRXSTO |
4372                                       MII_TG3_DSP_TAP26_OPCSINPT;
4373                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4374                         /* Fall through */
4375                 case ASIC_REV_5720:
4376                 case ASIC_REV_5762:
4377                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4378                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4379                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4380                 }
4381
4382                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4383                 if (!err)
4384                         err = err2;
4385         }
4386
4387 done:
4388         return err;
4389 }
4390
4391 static void tg3_phy_copper_begin(struct tg3 *tp)
4392 {
4393         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4394             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4395                 u32 adv, fc;
4396
4397                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4398                     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4399                         adv = ADVERTISED_10baseT_Half |
4400                               ADVERTISED_10baseT_Full;
4401                         if (tg3_flag(tp, WOL_SPEED_100MB))
4402                                 adv |= ADVERTISED_100baseT_Half |
4403                                        ADVERTISED_100baseT_Full;
4404                         if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4405                                 if (!(tp->phy_flags &
4406                                       TG3_PHYFLG_DISABLE_1G_HD_ADV))
4407                                         adv |= ADVERTISED_1000baseT_Half;
4408                                 adv |= ADVERTISED_1000baseT_Full;
4409                         }
4410
4411                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4412                 } else {
4413                         adv = tp->link_config.advertising;
4414                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4415                                 adv &= ~(ADVERTISED_1000baseT_Half |
4416                                          ADVERTISED_1000baseT_Full);
4417
4418                         fc = tp->link_config.flowctrl;
4419                 }
4420
4421                 tg3_phy_autoneg_cfg(tp, adv, fc);
4422
4423                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4424                     (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4425                         /* Normally during power down we want to autonegotiate
4426                          * the lowest possible speed for WOL. However, to avoid
4427                          * link flap, we leave it untouched.
4428                          */
4429                         return;
4430                 }
4431
4432                 tg3_writephy(tp, MII_BMCR,
4433                              BMCR_ANENABLE | BMCR_ANRESTART);
4434         } else {
4435                 int i;
4436                 u32 bmcr, orig_bmcr;
4437
4438                 tp->link_config.active_speed = tp->link_config.speed;
4439                 tp->link_config.active_duplex = tp->link_config.duplex;
4440
4441                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4442                         /* With autoneg disabled, 5715 only links up when the
4443                          * advertisement register has the configured speed
4444                          * enabled.
4445                          */
4446                         tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4447                 }
4448
4449                 bmcr = 0;
4450                 switch (tp->link_config.speed) {
4451                 default:
4452                 case SPEED_10:
4453                         break;
4454
4455                 case SPEED_100:
4456                         bmcr |= BMCR_SPEED100;
4457                         break;
4458
4459                 case SPEED_1000:
4460                         bmcr |= BMCR_SPEED1000;
4461                         break;
4462                 }
4463
4464                 if (tp->link_config.duplex == DUPLEX_FULL)
4465                         bmcr |= BMCR_FULLDPLX;
4466
4467                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4468                     (bmcr != orig_bmcr)) {
4469                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4470                         for (i = 0; i < 1500; i++) {
4471                                 u32 tmp;
4472
4473                                 udelay(10);
4474                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4475                                     tg3_readphy(tp, MII_BMSR, &tmp))
4476                                         continue;
4477                                 if (!(tmp & BMSR_LSTATUS)) {
4478                                         udelay(40);
4479                                         break;
4480                                 }
4481                         }
4482                         tg3_writephy(tp, MII_BMCR, bmcr);
4483                         udelay(40);
4484                 }
4485         }
4486 }
4487
4488 static int tg3_phy_pull_config(struct tg3 *tp)
4489 {
4490         int err;
4491         u32 val;
4492
4493         err = tg3_readphy(tp, MII_BMCR, &val);
4494         if (err)
4495                 goto done;
4496
4497         if (!(val & BMCR_ANENABLE)) {
4498                 tp->link_config.autoneg = AUTONEG_DISABLE;
4499                 tp->link_config.advertising = 0;
4500                 tg3_flag_clear(tp, PAUSE_AUTONEG);
4501
4502                 err = -EIO;
4503
4504                 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4505                 case 0:
4506                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4507                                 goto done;
4508
4509                         tp->link_config.speed = SPEED_10;
4510                         break;
4511                 case BMCR_SPEED100:
4512                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4513                                 goto done;
4514
4515                         tp->link_config.speed = SPEED_100;
4516                         break;
4517                 case BMCR_SPEED1000:
4518                         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4519                                 tp->link_config.speed = SPEED_1000;
4520                                 break;
4521                         }
4522                         /* Fall through */
4523                 default:
4524                         goto done;
4525                 }
4526
4527                 if (val & BMCR_FULLDPLX)
4528                         tp->link_config.duplex = DUPLEX_FULL;
4529                 else
4530                         tp->link_config.duplex = DUPLEX_HALF;
4531
4532                 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4533
4534                 err = 0;
4535                 goto done;
4536         }
4537
4538         tp->link_config.autoneg = AUTONEG_ENABLE;
4539         tp->link_config.advertising = ADVERTISED_Autoneg;
4540         tg3_flag_set(tp, PAUSE_AUTONEG);
4541
4542         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4543                 u32 adv;
4544
4545                 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4546                 if (err)
4547                         goto done;
4548
4549                 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4550                 tp->link_config.advertising |= adv | ADVERTISED_TP;
4551
4552                 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4553         } else {
4554                 tp->link_config.advertising |= ADVERTISED_FIBRE;
4555         }
4556
4557         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4558                 u32 adv;
4559
4560                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4561                         err = tg3_readphy(tp, MII_CTRL1000, &val);
4562                         if (err)
4563                                 goto done;
4564
4565                         adv = mii_ctrl1000_to_ethtool_adv_t(val);
4566                 } else {
4567                         err = tg3_readphy(tp, MII_ADVERTISE, &val);
4568                         if (err)
4569                                 goto done;
4570
4571                         adv = tg3_decode_flowctrl_1000X(val);
4572                         tp->link_config.flowctrl = adv;
4573
4574                         val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4575                         adv = mii_adv_to_ethtool_adv_x(val);
4576                 }
4577
4578                 tp->link_config.advertising |= adv;
4579         }
4580
4581 done:
4582         return err;
4583 }
4584
4585 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4586 {
4587         int err;
4588
4589         /* Turn off tap power management. */
4590         /* Set Extended packet length bit */
4591         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4592
4593         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4594         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4595         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4596         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4597         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4598
4599         udelay(40);
4600
4601         return err;
4602 }
4603
4604 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4605 {
4606         struct ethtool_eee eee;
4607
4608         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4609                 return true;
4610
4611         tg3_eee_pull_config(tp, &eee);
4612
4613         if (tp->eee.eee_enabled) {
4614                 if (tp->eee.advertised != eee.advertised ||
4615                     tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4616                     tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4617                         return false;
4618         } else {
4619                 /* EEE is disabled but we're advertising */
4620                 if (eee.advertised)
4621                         return false;
4622         }
4623
4624         return true;
4625 }
4626
4627 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4628 {
4629         u32 advmsk, tgtadv, advertising;
4630
4631         advertising = tp->link_config.advertising;
4632         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4633
4634         advmsk = ADVERTISE_ALL;
4635         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4636                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4637                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4638         }
4639
4640         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4641                 return false;
4642
4643         if ((*lcladv & advmsk) != tgtadv)
4644                 return false;
4645
4646         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4647                 u32 tg3_ctrl;
4648
4649                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4650
4651                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4652                         return false;
4653
4654                 if (tgtadv &&
4655                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4656                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4657                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4658                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4659                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4660                 } else {
4661                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4662                 }
4663
4664                 if (tg3_ctrl != tgtadv)
4665                         return false;
4666         }
4667
4668         return true;
4669 }
4670
4671 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4672 {
4673         u32 lpeth = 0;
4674
4675         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4676                 u32 val;
4677
4678                 if (tg3_readphy(tp, MII_STAT1000, &val))
4679                         return false;
4680
4681                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4682         }
4683
4684         if (tg3_readphy(tp, MII_LPA, rmtadv))
4685                 return false;
4686
4687         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4688         tp->link_config.rmt_adv = lpeth;
4689
4690         return true;
4691 }
4692
4693 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4694 {
4695         if (curr_link_up != tp->link_up) {
4696                 if (curr_link_up) {
4697                         netif_carrier_on(tp->dev);
4698                 } else {
4699                         netif_carrier_off(tp->dev);
4700                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4701                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4702                 }
4703
4704                 tg3_link_report(tp);
4705                 return true;
4706         }
4707
4708         return false;
4709 }
4710
4711 static void tg3_clear_mac_status(struct tg3 *tp)
4712 {
4713         tw32(MAC_EVENT, 0);
4714
4715         tw32_f(MAC_STATUS,
4716                MAC_STATUS_SYNC_CHANGED |
4717                MAC_STATUS_CFG_CHANGED |
4718                MAC_STATUS_MI_COMPLETION |
4719                MAC_STATUS_LNKSTATE_CHANGED);
4720         udelay(40);
4721 }
4722
4723 static void tg3_setup_eee(struct tg3 *tp)
4724 {
4725         u32 val;
4726
4727         val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4728               TG3_CPMU_EEE_LNKIDL_UART_IDL;
4729         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4730                 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4731
4732         tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4733
4734         tw32_f(TG3_CPMU_EEE_CTRL,
4735                TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4736
4737         val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4738               (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4739               TG3_CPMU_EEEMD_LPI_IN_RX |
4740               TG3_CPMU_EEEMD_EEE_ENABLE;
4741
4742         if (tg3_asic_rev(tp) != ASIC_REV_5717)
4743                 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4744
4745         if (tg3_flag(tp, ENABLE_APE))
4746                 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4747
4748         tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4749
4750         tw32_f(TG3_CPMU_EEE_DBTMR1,
4751                TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4752                (tp->eee.tx_lpi_timer & 0xffff));
4753
4754         tw32_f(TG3_CPMU_EEE_DBTMR2,
4755                TG3_CPMU_DBTMR2_APE_TX_2047US |
4756                TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4757 }
4758
4759 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4760 {
4761         bool current_link_up;
4762         u32 bmsr, val;
4763         u32 lcl_adv, rmt_adv;
4764         u16 current_speed;
4765         u8 current_duplex;
4766         int i, err;
4767
4768         tg3_clear_mac_status(tp);
4769
4770         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4771                 tw32_f(MAC_MI_MODE,
4772                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4773                 udelay(80);
4774         }
4775
4776         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4777
4778         /* Some third-party PHYs need to be reset on link going
4779          * down.
4780          */
4781         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4782              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4783              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4784             tp->link_up) {
4785                 tg3_readphy(tp, MII_BMSR, &bmsr);
4786                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4787                     !(bmsr & BMSR_LSTATUS))
4788                         force_reset = true;
4789         }
4790         if (force_reset)
4791                 tg3_phy_reset(tp);
4792
4793         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4794                 tg3_readphy(tp, MII_BMSR, &bmsr);
4795                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4796                     !tg3_flag(tp, INIT_COMPLETE))
4797                         bmsr = 0;
4798
4799                 if (!(bmsr & BMSR_LSTATUS)) {
4800                         err = tg3_init_5401phy_dsp(tp);
4801                         if (err)
4802                                 return err;
4803
4804                         tg3_readphy(tp, MII_BMSR, &bmsr);
4805                         for (i = 0; i < 1000; i++) {
4806                                 udelay(10);
4807                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4808                                     (bmsr & BMSR_LSTATUS)) {
4809                                         udelay(40);
4810                                         break;
4811                                 }
4812                         }
4813
4814                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4815                             TG3_PHY_REV_BCM5401_B0 &&
4816                             !(bmsr & BMSR_LSTATUS) &&
4817                             tp->link_config.active_speed == SPEED_1000) {
4818                                 err = tg3_phy_reset(tp);
4819                                 if (!err)
4820                                         err = tg3_init_5401phy_dsp(tp);
4821                                 if (err)
4822                                         return err;
4823                         }
4824                 }
4825         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4826                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4827                 /* 5701 {A0,B0} CRC bug workaround */
4828                 tg3_writephy(tp, 0x15, 0x0a75);
4829                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4830                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4831                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4832         }
4833
4834         /* Clear pending interrupts... */
4835         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4836         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4837
4838         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4839                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4840         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4841                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4842
4843         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4844             tg3_asic_rev(tp) == ASIC_REV_5701) {
4845                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4846                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4847                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4848                 else
4849                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4850         }
4851
4852         current_link_up = false;
4853         current_speed = SPEED_UNKNOWN;
4854         current_duplex = DUPLEX_UNKNOWN;
4855         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4856         tp->link_config.rmt_adv = 0;
4857
4858         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4859                 err = tg3_phy_auxctl_read(tp,
4860                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4861                                           &val);
4862                 if (!err && !(val & (1 << 10))) {
4863                         tg3_phy_auxctl_write(tp,
4864                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4865                                              val | (1 << 10));
4866                         goto relink;
4867                 }
4868         }
4869
4870         bmsr = 0;
4871         for (i = 0; i < 100; i++) {
4872                 tg3_readphy(tp, MII_BMSR, &bmsr);
4873                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4874                     (bmsr & BMSR_LSTATUS))
4875                         break;
4876                 udelay(40);
4877         }
4878
4879         if (bmsr & BMSR_LSTATUS) {
4880                 u32 aux_stat, bmcr;
4881
4882                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4883                 for (i = 0; i < 2000; i++) {
4884                         udelay(10);
4885                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4886                             aux_stat)
4887                                 break;
4888                 }
4889
4890                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4891                                              &current_speed,
4892                                              &current_duplex);
4893
4894                 bmcr = 0;
4895                 for (i = 0; i < 200; i++) {
4896                         tg3_readphy(tp, MII_BMCR, &bmcr);
4897                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4898                                 continue;
4899                         if (bmcr && bmcr != 0x7fff)
4900                                 break;
4901                         udelay(10);
4902                 }
4903
4904                 lcl_adv = 0;
4905                 rmt_adv = 0;
4906
4907                 tp->link_config.active_speed = current_speed;
4908                 tp->link_config.active_duplex = current_duplex;
4909
4910                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4911                         bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4912
4913                         if ((bmcr & BMCR_ANENABLE) &&
4914                             eee_config_ok &&
4915                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4916                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4917                                 current_link_up = true;
4918
4919                         /* EEE settings changes take effect only after a phy
4920                          * reset.  If we have skipped a reset due to Link Flap
4921                          * Avoidance being enabled, do it now.
4922                          */
4923                         if (!eee_config_ok &&
4924                             (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4925                             !force_reset) {
4926                                 tg3_setup_eee(tp);
4927                                 tg3_phy_reset(tp);
4928                         }
4929                 } else {
4930                         if (!(bmcr & BMCR_ANENABLE) &&
4931                             tp->link_config.speed == current_speed &&
4932                             tp->link_config.duplex == current_duplex) {
4933                                 current_link_up = true;
4934                         }
4935                 }
4936
4937                 if (current_link_up &&
4938                     tp->link_config.active_duplex == DUPLEX_FULL) {
4939                         u32 reg, bit;
4940
4941                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4942                                 reg = MII_TG3_FET_GEN_STAT;
4943                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4944                         } else {
4945                                 reg = MII_TG3_EXT_STAT;
4946                                 bit = MII_TG3_EXT_STAT_MDIX;
4947                         }
4948
4949                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4950                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4951
4952                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4953                 }
4954         }
4955
4956 relink:
4957         if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4958                 tg3_phy_copper_begin(tp);
4959
4960                 if (tg3_flag(tp, ROBOSWITCH)) {
4961                         current_link_up = true;
4962                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4963                         current_speed = SPEED_1000;
4964                         current_duplex = DUPLEX_FULL;
4965                         tp->link_config.active_speed = current_speed;
4966                         tp->link_config.active_duplex = current_duplex;
4967                 }
4968
4969                 tg3_readphy(tp, MII_BMSR, &bmsr);
4970                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4971                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4972                         current_link_up = true;
4973         }
4974
4975         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4976         if (current_link_up) {
4977                 if (tp->link_config.active_speed == SPEED_100 ||
4978                     tp->link_config.active_speed == SPEED_10)
4979                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4980                 else
4981                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4982         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4983                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4984         else
4985                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4986
4987         /* In order for the 5750 core in BCM4785 chip to work properly
4988          * in RGMII mode, the Led Control Register must be set up.
4989          */
4990         if (tg3_flag(tp, RGMII_MODE)) {
4991                 u32 led_ctrl = tr32(MAC_LED_CTRL);
4992                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4993
4994                 if (tp->link_config.active_speed == SPEED_10)
4995                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4996                 else if (tp->link_config.active_speed == SPEED_100)
4997                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4998                                      LED_CTRL_100MBPS_ON);
4999                 else if (tp->link_config.active_speed == SPEED_1000)
5000                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5001                                      LED_CTRL_1000MBPS_ON);
5002
5003                 tw32(MAC_LED_CTRL, led_ctrl);
5004                 udelay(40);
5005         }
5006
5007         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5008         if (tp->link_config.active_duplex == DUPLEX_HALF)
5009                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5010
5011         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5012                 if (current_link_up &&
5013                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5014                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5015                 else
5016                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5017         }
5018
5019         /* ??? Without this setting Netgear GA302T PHY does not
5020          * ??? send/receive packets...
5021          */
5022         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5023             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5024                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5025                 tw32_f(MAC_MI_MODE, tp->mi_mode);
5026                 udelay(80);
5027         }
5028
5029         tw32_f(MAC_MODE, tp->mac_mode);
5030         udelay(40);
5031
5032         tg3_phy_eee_adjust(tp, current_link_up);
5033
5034         if (tg3_flag(tp, USE_LINKCHG_REG)) {
5035                 /* Polled via timer. */
5036                 tw32_f(MAC_EVENT, 0);
5037         } else {
5038                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5039         }
5040         udelay(40);
5041
5042         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5043             current_link_up &&
5044             tp->link_config.active_speed == SPEED_1000 &&
5045             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5046                 udelay(120);
5047                 tw32_f(MAC_STATUS,
5048                      (MAC_STATUS_SYNC_CHANGED |
5049                       MAC_STATUS_CFG_CHANGED));
5050                 udelay(40);
5051                 tg3_write_mem(tp,
5052                               NIC_SRAM_FIRMWARE_MBOX,
5053                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5054         }
5055
5056         /* Prevent send BD corruption. */
5057         if (tg3_flag(tp, CLKREQ_BUG)) {
5058                 if (tp->link_config.active_speed == SPEED_100 ||
5059                     tp->link_config.active_speed == SPEED_10)
5060                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5061                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
5062                 else
5063                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5064                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
5065         }
5066
5067         tg3_test_and_report_link_chg(tp, current_link_up);
5068
5069         return 0;
5070 }
5071
5072 struct tg3_fiber_aneginfo {
5073         int state;
5074 #define ANEG_STATE_UNKNOWN              0
5075 #define ANEG_STATE_AN_ENABLE            1
5076 #define ANEG_STATE_RESTART_INIT         2
5077 #define ANEG_STATE_RESTART              3
5078 #define ANEG_STATE_DISABLE_LINK_OK      4
5079 #define ANEG_STATE_ABILITY_DETECT_INIT  5
5080 #define ANEG_STATE_ABILITY_DETECT       6
5081 #define ANEG_STATE_ACK_DETECT_INIT      7
5082 #define ANEG_STATE_ACK_DETECT           8
5083 #define ANEG_STATE_COMPLETE_ACK_INIT    9
5084 #define ANEG_STATE_COMPLETE_ACK         10
5085 #define ANEG_STATE_IDLE_DETECT_INIT     11
5086 #define ANEG_STATE_IDLE_DETECT          12
5087 #define ANEG_STATE_LINK_OK              13
5088 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
5089 #define ANEG_STATE_NEXT_PAGE_WAIT       15
5090
5091         u32 flags;
5092 #define MR_AN_ENABLE            0x00000001
5093 #define MR_RESTART_AN           0x00000002
5094 #define MR_AN_COMPLETE          0x00000004
5095 #define MR_PAGE_RX              0x00000008
5096 #define MR_NP_LOADED            0x00000010
5097 #define MR_TOGGLE_TX            0x00000020
5098 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
5099 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
5100 #define MR_LP_ADV_SYM_PAUSE     0x00000100
5101 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
5102 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5103 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5104 #define MR_LP_ADV_NEXT_PAGE     0x00001000
5105 #define MR_TOGGLE_RX            0x00002000
5106 #define MR_NP_RX                0x00004000
5107
5108 #define MR_LINK_OK              0x80000000
5109
5110         unsigned long link_time, cur_time;
5111
5112         u32 ability_match_cfg;
5113         int ability_match_count;
5114
5115         char ability_match, idle_match, ack_match;
5116
5117         u32 txconfig, rxconfig;
5118 #define ANEG_CFG_NP             0x00000080
5119 #define ANEG_CFG_ACK            0x00000040
5120 #define ANEG_CFG_RF2            0x00000020
5121 #define ANEG_CFG_RF1            0x00000010
5122 #define ANEG_CFG_PS2            0x00000001
5123 #define ANEG_CFG_PS1            0x00008000
5124 #define ANEG_CFG_HD             0x00004000
5125 #define ANEG_CFG_FD             0x00002000
5126 #define ANEG_CFG_INVAL          0x00001f06
5127
5128 };
5129 #define ANEG_OK         0
5130 #define ANEG_DONE       1
5131 #define ANEG_TIMER_ENAB 2
5132 #define ANEG_FAILED     -1
5133
5134 #define ANEG_STATE_SETTLE_TIME  10000
5135
5136 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5137                                    struct tg3_fiber_aneginfo *ap)
5138 {
5139         u16 flowctrl;
5140         unsigned long delta;
5141         u32 rx_cfg_reg;
5142         int ret;
5143
5144         if (ap->state == ANEG_STATE_UNKNOWN) {
5145                 ap->rxconfig = 0;
5146                 ap->link_time = 0;
5147                 ap->cur_time = 0;
5148                 ap->ability_match_cfg = 0;
5149                 ap->ability_match_count = 0;
5150                 ap->ability_match = 0;
5151                 ap->idle_match = 0;
5152                 ap->ack_match = 0;
5153         }
5154         ap->cur_time++;
5155
5156         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5157                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5158
5159                 if (rx_cfg_reg != ap->ability_match_cfg) {
5160                         ap->ability_match_cfg = rx_cfg_reg;
5161                         ap->ability_match = 0;
5162                         ap->ability_match_count = 0;
5163                 } else {
5164                         if (++ap->ability_match_count > 1) {
5165                                 ap->ability_match = 1;
5166                                 ap->ability_match_cfg = rx_cfg_reg;
5167                         }
5168                 }
5169                 if (rx_cfg_reg & ANEG_CFG_ACK)
5170                         ap->ack_match = 1;
5171                 else
5172                         ap->ack_match = 0;
5173
5174                 ap->idle_match = 0;
5175         } else {
5176                 ap->idle_match = 1;
5177                 ap->ability_match_cfg = 0;
5178                 ap->ability_match_count = 0;
5179                 ap->ability_match = 0;
5180                 ap->ack_match = 0;
5181
5182                 rx_cfg_reg = 0;
5183         }
5184
5185         ap->rxconfig = rx_cfg_reg;
5186         ret = ANEG_OK;
5187
5188         switch (ap->state) {
5189         case ANEG_STATE_UNKNOWN:
5190                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5191                         ap->state = ANEG_STATE_AN_ENABLE;
5192
5193                 /* fallthru */
5194         case ANEG_STATE_AN_ENABLE:
5195                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5196                 if (ap->flags & MR_AN_ENABLE) {
5197                         ap->link_time = 0;
5198                         ap->cur_time = 0;
5199                         ap->ability_match_cfg = 0;
5200                         ap->ability_match_count = 0;
5201                         ap->ability_match = 0;
5202                         ap->idle_match = 0;
5203                         ap->ack_match = 0;
5204
5205                         ap->state = ANEG_STATE_RESTART_INIT;
5206                 } else {
5207                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
5208                 }
5209                 break;
5210
5211         case ANEG_STATE_RESTART_INIT:
5212                 ap->link_time = ap->cur_time;
5213                 ap->flags &= ~(MR_NP_LOADED);
5214                 ap->txconfig = 0;
5215                 tw32(MAC_TX_AUTO_NEG, 0);
5216                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5217                 tw32_f(MAC_MODE, tp->mac_mode);
5218                 udelay(40);
5219
5220                 ret = ANEG_TIMER_ENAB;
5221                 ap->state = ANEG_STATE_RESTART;
5222
5223                 /* fallthru */
5224         case ANEG_STATE_RESTART:
5225                 delta = ap->cur_time - ap->link_time;
5226                 if (delta > ANEG_STATE_SETTLE_TIME)
5227                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5228                 else
5229                         ret = ANEG_TIMER_ENAB;
5230                 break;
5231
5232         case ANEG_STATE_DISABLE_LINK_OK:
5233                 ret = ANEG_DONE;
5234                 break;
5235
5236         case ANEG_STATE_ABILITY_DETECT_INIT:
5237                 ap->flags &= ~(MR_TOGGLE_TX);
5238                 ap->txconfig = ANEG_CFG_FD;
5239                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5240                 if (flowctrl & ADVERTISE_1000XPAUSE)
5241                         ap->txconfig |= ANEG_CFG_PS1;
5242                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5243                         ap->txconfig |= ANEG_CFG_PS2;
5244                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5245                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5246                 tw32_f(MAC_MODE, tp->mac_mode);
5247                 udelay(40);
5248
5249                 ap->state = ANEG_STATE_ABILITY_DETECT;
5250                 break;
5251
5252         case ANEG_STATE_ABILITY_DETECT:
5253                 if (ap->ability_match != 0 && ap->rxconfig != 0)
5254                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
5255                 break;
5256
5257         case ANEG_STATE_ACK_DETECT_INIT:
5258                 ap->txconfig |= ANEG_CFG_ACK;
5259                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5260                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5261                 tw32_f(MAC_MODE, tp->mac_mode);
5262                 udelay(40);
5263
5264                 ap->state = ANEG_STATE_ACK_DETECT;
5265
5266                 /* fallthru */
5267         case ANEG_STATE_ACK_DETECT:
5268                 if (ap->ack_match != 0) {
5269                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5270                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5271                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5272                         } else {
5273                                 ap->state = ANEG_STATE_AN_ENABLE;
5274                         }
5275                 } else if (ap->ability_match != 0 &&
5276                            ap->rxconfig == 0) {
5277                         ap->state = ANEG_STATE_AN_ENABLE;
5278                 }
5279                 break;
5280
5281         case ANEG_STATE_COMPLETE_ACK_INIT:
5282                 if (ap->rxconfig & ANEG_CFG_INVAL) {
5283                         ret = ANEG_FAILED;
5284                         break;
5285                 }
5286                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5287                                MR_LP_ADV_HALF_DUPLEX |
5288                                MR_LP_ADV_SYM_PAUSE |
5289                                MR_LP_ADV_ASYM_PAUSE |
5290                                MR_LP_ADV_REMOTE_FAULT1 |
5291                                MR_LP_ADV_REMOTE_FAULT2 |
5292                                MR_LP_ADV_NEXT_PAGE |
5293                                MR_TOGGLE_RX |
5294                                MR_NP_RX);
5295                 if (ap->rxconfig & ANEG_CFG_FD)
5296                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5297                 if (ap->rxconfig & ANEG_CFG_HD)
5298                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5299                 if (ap->rxconfig & ANEG_CFG_PS1)
5300                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
5301                 if (ap->rxconfig & ANEG_CFG_PS2)
5302                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5303                 if (ap->rxconfig & ANEG_CFG_RF1)
5304                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5305                 if (ap->rxconfig & ANEG_CFG_RF2)
5306                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5307                 if (ap->rxconfig & ANEG_CFG_NP)
5308                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
5309
5310                 ap->link_time = ap->cur_time;
5311
5312                 ap->flags ^= (MR_TOGGLE_TX);
5313                 if (ap->rxconfig & 0x0008)
5314                         ap->flags |= MR_TOGGLE_RX;
5315                 if (ap->rxconfig & ANEG_CFG_NP)
5316                         ap->flags |= MR_NP_RX;
5317                 ap->flags |= MR_PAGE_RX;
5318
5319                 ap->state = ANEG_STATE_COMPLETE_ACK;
5320                 ret = ANEG_TIMER_ENAB;
5321                 break;
5322
5323         case ANEG_STATE_COMPLETE_ACK:
5324                 if (ap->ability_match != 0 &&
5325                     ap->rxconfig == 0) {
5326                         ap->state = ANEG_STATE_AN_ENABLE;
5327                         break;
5328                 }
5329                 delta = ap->cur_time - ap->link_time;
5330                 if (delta > ANEG_STATE_SETTLE_TIME) {
5331                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5332                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5333                         } else {
5334                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5335                                     !(ap->flags & MR_NP_RX)) {
5336                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5337                                 } else {
5338                                         ret = ANEG_FAILED;
5339                                 }
5340                         }
5341                 }
5342                 break;
5343
5344         case ANEG_STATE_IDLE_DETECT_INIT:
5345                 ap->link_time = ap->cur_time;
5346                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5347                 tw32_f(MAC_MODE, tp->mac_mode);
5348                 udelay(40);
5349
5350                 ap->state = ANEG_STATE_IDLE_DETECT;
5351                 ret = ANEG_TIMER_ENAB;
5352                 break;
5353
5354         case ANEG_STATE_IDLE_DETECT:
5355                 if (ap->ability_match != 0 &&
5356                     ap->rxconfig == 0) {
5357                         ap->state = ANEG_STATE_AN_ENABLE;
5358                         break;
5359                 }
5360                 delta = ap->cur_time - ap->link_time;
5361                 if (delta > ANEG_STATE_SETTLE_TIME) {
5362                         /* XXX another gem from the Broadcom driver :( */
5363                         ap->state = ANEG_STATE_LINK_OK;
5364                 }
5365                 break;
5366
5367         case ANEG_STATE_LINK_OK:
5368                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5369                 ret = ANEG_DONE;
5370                 break;
5371
5372         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5373                 /* ??? unimplemented */
5374                 break;
5375
5376         case ANEG_STATE_NEXT_PAGE_WAIT:
5377                 /* ??? unimplemented */
5378                 break;
5379
5380         default:
5381                 ret = ANEG_FAILED;
5382                 break;
5383         }
5384
5385         return ret;
5386 }
5387
5388 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5389 {
5390         int res = 0;
5391         struct tg3_fiber_aneginfo aninfo;
5392         int status = ANEG_FAILED;
5393         unsigned int tick;
5394         u32 tmp;
5395
5396         tw32_f(MAC_TX_AUTO_NEG, 0);
5397
5398         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5399         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5400         udelay(40);
5401
5402         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5403         udelay(40);
5404
5405         memset(&aninfo, 0, sizeof(aninfo));
5406         aninfo.flags |= MR_AN_ENABLE;
5407         aninfo.state = ANEG_STATE_UNKNOWN;
5408         aninfo.cur_time = 0;
5409         tick = 0;
5410         while (++tick < 195000) {
5411                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5412                 if (status == ANEG_DONE || status == ANEG_FAILED)
5413                         break;
5414
5415                 udelay(1);
5416         }
5417
5418         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5419         tw32_f(MAC_MODE, tp->mac_mode);
5420         udelay(40);
5421
5422         *txflags = aninfo.txconfig;
5423         *rxflags = aninfo.flags;
5424
5425         if (status == ANEG_DONE &&
5426             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5427                              MR_LP_ADV_FULL_DUPLEX)))
5428                 res = 1;
5429
5430         return res;
5431 }
5432
5433 static void tg3_init_bcm8002(struct tg3 *tp)
5434 {
5435         u32 mac_status = tr32(MAC_STATUS);
5436         int i;
5437
5438         /* Reset when initting first time or we have a link. */
5439         if (tg3_flag(tp, INIT_COMPLETE) &&
5440             !(mac_status & MAC_STATUS_PCS_SYNCED))
5441                 return;
5442
5443         /* Set PLL lock range. */
5444         tg3_writephy(tp, 0x16, 0x8007);
5445
5446         /* SW reset */
5447         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5448
5449         /* Wait for reset to complete. */
5450         /* XXX schedule_timeout() ... */
5451         for (i = 0; i < 500; i++)
5452                 udelay(10);
5453
5454         /* Config mode; select PMA/Ch 1 regs. */
5455         tg3_writephy(tp, 0x10, 0x8411);
5456
5457         /* Enable auto-lock and comdet, select txclk for tx. */
5458         tg3_writephy(tp, 0x11, 0x0a10);
5459
5460         tg3_writephy(tp, 0x18, 0x00a0);
5461         tg3_writephy(tp, 0x16, 0x41ff);
5462
5463         /* Assert and deassert POR. */
5464         tg3_writephy(tp, 0x13, 0x0400);
5465         udelay(40);
5466         tg3_writephy(tp, 0x13, 0x0000);
5467
5468         tg3_writephy(tp, 0x11, 0x0a50);
5469         udelay(40);
5470         tg3_writephy(tp, 0x11, 0x0a10);
5471
5472         /* Wait for signal to stabilize */
5473         /* XXX schedule_timeout() ... */
5474         for (i = 0; i < 15000; i++)
5475                 udelay(10);
5476
5477         /* Deselect the channel register so we can read the PHYID
5478          * later.
5479          */
5480         tg3_writephy(tp, 0x10, 0x8011);
5481 }
5482
5483 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5484 {
5485         u16 flowctrl;
5486         bool current_link_up;
5487         u32 sg_dig_ctrl, sg_dig_status;
5488         u32 serdes_cfg, expected_sg_dig_ctrl;
5489         int workaround, port_a;
5490
5491         serdes_cfg = 0;
5492         expected_sg_dig_ctrl = 0;
5493         workaround = 0;
5494         port_a = 1;
5495         current_link_up = false;
5496
5497         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5498             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5499                 workaround = 1;
5500                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5501                         port_a = 0;
5502
5503                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5504                 /* preserve bits 20-23 for voltage regulator */
5505                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5506         }
5507
5508         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5509
5510         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5511                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5512                         if (workaround) {
5513                                 u32 val = serdes_cfg;
5514
5515                                 if (port_a)
5516                                         val |= 0xc010000;
5517                                 else
5518                                         val |= 0x4010000;
5519                                 tw32_f(MAC_SERDES_CFG, val);
5520                         }
5521
5522                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5523                 }
5524                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5525                         tg3_setup_flow_control(tp, 0, 0);
5526                         current_link_up = true;
5527                 }
5528                 goto out;
5529         }
5530
5531         /* Want auto-negotiation.  */
5532         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5533
5534         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5535         if (flowctrl & ADVERTISE_1000XPAUSE)
5536                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5537         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5538                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5539
5540         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5541                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5542                     tp->serdes_counter &&
5543                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5544                                     MAC_STATUS_RCVD_CFG)) ==
5545                      MAC_STATUS_PCS_SYNCED)) {
5546                         tp->serdes_counter--;
5547                         current_link_up = true;
5548                         goto out;
5549                 }
5550 restart_autoneg:
5551                 if (workaround)
5552                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5553                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5554                 udelay(5);
5555                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5556
5557                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5558                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5559         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5560                                  MAC_STATUS_SIGNAL_DET)) {
5561                 sg_dig_status = tr32(SG_DIG_STATUS);
5562                 mac_status = tr32(MAC_STATUS);
5563
5564                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5565                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5566                         u32 local_adv = 0, remote_adv = 0;
5567
5568                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5569                                 local_adv |= ADVERTISE_1000XPAUSE;
5570                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5571                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5572
5573                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5574                                 remote_adv |= LPA_1000XPAUSE;
5575                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5576                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5577
5578                         tp->link_config.rmt_adv =
5579                                            mii_adv_to_ethtool_adv_x(remote_adv);
5580
5581                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5582                         current_link_up = true;
5583                         tp->serdes_counter = 0;
5584                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5585                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5586                         if (tp->serdes_counter)
5587                                 tp->serdes_counter--;
5588                         else {
5589                                 if (workaround) {
5590                                         u32 val = serdes_cfg;
5591
5592                                         if (port_a)
5593                                                 val |= 0xc010000;
5594                                         else
5595                                                 val |= 0x4010000;
5596
5597                                         tw32_f(MAC_SERDES_CFG, val);
5598                                 }
5599
5600                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5601                                 udelay(40);
5602
5603                                 /* Link parallel detection - link is up */
5604                                 /* only if we have PCS_SYNC and not */
5605                                 /* receiving config code words */
5606                                 mac_status = tr32(MAC_STATUS);
5607                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5608                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5609                                         tg3_setup_flow_control(tp, 0, 0);
5610                                         current_link_up = true;
5611                                         tp->phy_flags |=
5612                                                 TG3_PHYFLG_PARALLEL_DETECT;
5613                                         tp->serdes_counter =
5614                                                 SERDES_PARALLEL_DET_TIMEOUT;
5615                                 } else
5616                                         goto restart_autoneg;
5617                         }
5618                 }
5619         } else {
5620                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5621                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5622         }
5623
5624 out:
5625         return current_link_up;
5626 }
5627
5628 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5629 {
5630         bool current_link_up = false;
5631
5632         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5633                 goto out;
5634
5635         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5636                 u32 txflags, rxflags;
5637                 int i;
5638
5639                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5640                         u32 local_adv = 0, remote_adv = 0;
5641
5642                         if (txflags & ANEG_CFG_PS1)
5643                                 local_adv |= ADVERTISE_1000XPAUSE;
5644                         if (txflags & ANEG_CFG_PS2)
5645                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5646
5647                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5648                                 remote_adv |= LPA_1000XPAUSE;
5649                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5650                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5651
5652                         tp->link_config.rmt_adv =
5653                                            mii_adv_to_ethtool_adv_x(remote_adv);
5654
5655                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5656
5657                         current_link_up = true;
5658                 }
5659                 for (i = 0; i < 30; i++) {
5660                         udelay(20);
5661                         tw32_f(MAC_STATUS,
5662                                (MAC_STATUS_SYNC_CHANGED |
5663                                 MAC_STATUS_CFG_CHANGED));
5664                         udelay(40);
5665                         if ((tr32(MAC_STATUS) &
5666                              (MAC_STATUS_SYNC_CHANGED |
5667                               MAC_STATUS_CFG_CHANGED)) == 0)
5668                                 break;
5669                 }
5670
5671                 mac_status = tr32(MAC_STATUS);
5672                 if (!current_link_up &&
5673                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5674                     !(mac_status & MAC_STATUS_RCVD_CFG))
5675                         current_link_up = true;
5676         } else {
5677                 tg3_setup_flow_control(tp, 0, 0);
5678
5679                 /* Forcing 1000FD link up. */
5680                 current_link_up = true;
5681
5682                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5683                 udelay(40);
5684
5685                 tw32_f(MAC_MODE, tp->mac_mode);
5686                 udelay(40);
5687         }
5688
5689 out:
5690         return current_link_up;
5691 }
5692
5693 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5694 {
5695         u32 orig_pause_cfg;
5696         u16 orig_active_speed;
5697         u8 orig_active_duplex;
5698         u32 mac_status;
5699         bool current_link_up;
5700         int i;
5701
5702         orig_pause_cfg = tp->link_config.active_flowctrl;
5703         orig_active_speed = tp->link_config.active_speed;
5704         orig_active_duplex = tp->link_config.active_duplex;
5705
5706         if (!tg3_flag(tp, HW_AUTONEG) &&
5707             tp->link_up &&
5708             tg3_flag(tp, INIT_COMPLETE)) {
5709                 mac_status = tr32(MAC_STATUS);
5710                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5711                                MAC_STATUS_SIGNAL_DET |
5712                                MAC_STATUS_CFG_CHANGED |
5713                                MAC_STATUS_RCVD_CFG);
5714                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5715                                    MAC_STATUS_SIGNAL_DET)) {
5716                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5717                                             MAC_STATUS_CFG_CHANGED));
5718                         return 0;
5719                 }
5720         }
5721
5722         tw32_f(MAC_TX_AUTO_NEG, 0);
5723
5724         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5725         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5726         tw32_f(MAC_MODE, tp->mac_mode);
5727         udelay(40);
5728
5729         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5730                 tg3_init_bcm8002(tp);
5731
5732         /* Enable link change event even when serdes polling.  */
5733         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5734         udelay(40);
5735
5736         current_link_up = false;
5737         tp->link_config.rmt_adv = 0;
5738         mac_status = tr32(MAC_STATUS);
5739
5740         if (tg3_flag(tp, HW_AUTONEG))
5741                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5742         else
5743                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5744
5745         tp->napi[0].hw_status->status =
5746                 (SD_STATUS_UPDATED |
5747                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5748
5749         for (i = 0; i < 100; i++) {
5750                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5751                                     MAC_STATUS_CFG_CHANGED));
5752                 udelay(5);
5753                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5754                                          MAC_STATUS_CFG_CHANGED |
5755                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5756                         break;
5757         }
5758
5759         mac_status = tr32(MAC_STATUS);
5760         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5761                 current_link_up = false;
5762                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5763                     tp->serdes_counter == 0) {
5764                         tw32_f(MAC_MODE, (tp->mac_mode |
5765                                           MAC_MODE_SEND_CONFIGS));
5766                         udelay(1);
5767                         tw32_f(MAC_MODE, tp->mac_mode);
5768                 }
5769         }
5770
5771         if (current_link_up) {
5772                 tp->link_config.active_speed = SPEED_1000;
5773                 tp->link_config.active_duplex = DUPLEX_FULL;
5774                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5775                                     LED_CTRL_LNKLED_OVERRIDE |
5776                                     LED_CTRL_1000MBPS_ON));
5777         } else {
5778                 tp->link_config.active_speed = SPEED_UNKNOWN;
5779                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5780                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5781                                     LED_CTRL_LNKLED_OVERRIDE |
5782                                     LED_CTRL_TRAFFIC_OVERRIDE));
5783         }
5784
5785         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5786                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5787                 if (orig_pause_cfg != now_pause_cfg ||
5788                     orig_active_speed != tp->link_config.active_speed ||
5789                     orig_active_duplex != tp->link_config.active_duplex)
5790                         tg3_link_report(tp);
5791         }
5792
5793         return 0;
5794 }
5795
5796 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5797 {
5798         int err = 0;
5799         u32 bmsr, bmcr;
5800         u16 current_speed = SPEED_UNKNOWN;
5801         u8 current_duplex = DUPLEX_UNKNOWN;
5802         bool current_link_up = false;
5803         u32 local_adv, remote_adv, sgsr;
5804
5805         if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5806              tg3_asic_rev(tp) == ASIC_REV_5720) &&
5807              !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5808              (sgsr & SERDES_TG3_SGMII_MODE)) {
5809
5810                 if (force_reset)
5811                         tg3_phy_reset(tp);
5812
5813                 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5814
5815                 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5816                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5817                 } else {
5818                         current_link_up = true;
5819                         if (sgsr & SERDES_TG3_SPEED_1000) {
5820                                 current_speed = SPEED_1000;
5821                                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5822                         } else if (sgsr & SERDES_TG3_SPEED_100) {
5823                                 current_speed = SPEED_100;
5824                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5825                         } else {
5826                                 current_speed = SPEED_10;
5827                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5828                         }
5829
5830                         if (sgsr & SERDES_TG3_FULL_DUPLEX)
5831                                 current_duplex = DUPLEX_FULL;
5832                         else
5833                                 current_duplex = DUPLEX_HALF;
5834                 }
5835
5836                 tw32_f(MAC_MODE, tp->mac_mode);
5837                 udelay(40);
5838
5839                 tg3_clear_mac_status(tp);
5840
5841                 goto fiber_setup_done;
5842         }
5843
5844         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5845         tw32_f(MAC_MODE, tp->mac_mode);
5846         udelay(40);
5847
5848         tg3_clear_mac_status(tp);
5849
5850         if (force_reset)
5851                 tg3_phy_reset(tp);
5852
5853         tp->link_config.rmt_adv = 0;
5854
5855         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5856         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5857         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5858                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5859                         bmsr |= BMSR_LSTATUS;
5860                 else
5861                         bmsr &= ~BMSR_LSTATUS;
5862         }
5863
5864         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5865
5866         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5867             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5868                 /* do nothing, just check for link up at the end */
5869         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5870                 u32 adv, newadv;
5871
5872                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5873                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5874                                  ADVERTISE_1000XPAUSE |
5875                                  ADVERTISE_1000XPSE_ASYM |
5876                                  ADVERTISE_SLCT);
5877
5878                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5879                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5880
5881                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5882                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5883                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5884                         tg3_writephy(tp, MII_BMCR, bmcr);
5885
5886                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5887                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5888                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5889
5890                         return err;
5891                 }
5892         } else {
5893                 u32 new_bmcr;
5894
5895                 bmcr &= ~BMCR_SPEED1000;
5896                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5897
5898                 if (tp->link_config.duplex == DUPLEX_FULL)
5899                         new_bmcr |= BMCR_FULLDPLX;
5900
5901                 if (new_bmcr != bmcr) {
5902                         /* BMCR_SPEED1000 is a reserved bit that needs
5903                          * to be set on write.
5904                          */
5905                         new_bmcr |= BMCR_SPEED1000;
5906
5907                         /* Force a linkdown */
5908                         if (tp->link_up) {
5909                                 u32 adv;
5910
5911                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5912                                 adv &= ~(ADVERTISE_1000XFULL |
5913                                          ADVERTISE_1000XHALF |
5914                                          ADVERTISE_SLCT);
5915                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5916                                 tg3_writephy(tp, MII_BMCR, bmcr |
5917                                                            BMCR_ANRESTART |
5918                                                            BMCR_ANENABLE);
5919                                 udelay(10);
5920                                 tg3_carrier_off(tp);
5921                         }
5922                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5923                         bmcr = new_bmcr;
5924                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5925                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5926                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5927                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5928                                         bmsr |= BMSR_LSTATUS;
5929                                 else
5930                                         bmsr &= ~BMSR_LSTATUS;
5931                         }
5932                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5933                 }
5934         }
5935
5936         if (bmsr & BMSR_LSTATUS) {
5937                 current_speed = SPEED_1000;
5938                 current_link_up = true;
5939                 if (bmcr & BMCR_FULLDPLX)
5940                         current_duplex = DUPLEX_FULL;
5941                 else
5942                         current_duplex = DUPLEX_HALF;
5943
5944                 local_adv = 0;
5945                 remote_adv = 0;
5946
5947                 if (bmcr & BMCR_ANENABLE) {
5948                         u32 common;
5949
5950                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5951                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5952                         common = local_adv & remote_adv;
5953                         if (common & (ADVERTISE_1000XHALF |
5954                                       ADVERTISE_1000XFULL)) {
5955                                 if (common & ADVERTISE_1000XFULL)
5956                                         current_duplex = DUPLEX_FULL;
5957                                 else
5958                                         current_duplex = DUPLEX_HALF;
5959
5960                                 tp->link_config.rmt_adv =
5961                                            mii_adv_to_ethtool_adv_x(remote_adv);
5962                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5963                                 /* Link is up via parallel detect */
5964                         } else {
5965                                 current_link_up = false;
5966                         }
5967                 }
5968         }
5969
5970 fiber_setup_done:
5971         if (current_link_up && current_duplex == DUPLEX_FULL)
5972                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5973
5974         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5975         if (tp->link_config.active_duplex == DUPLEX_HALF)
5976                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5977
5978         tw32_f(MAC_MODE, tp->mac_mode);
5979         udelay(40);
5980
5981         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5982
5983         tp->link_config.active_speed = current_speed;
5984         tp->link_config.active_duplex = current_duplex;
5985
5986         tg3_test_and_report_link_chg(tp, current_link_up);
5987         return err;
5988 }
5989
5990 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5991 {
5992         if (tp->serdes_counter) {
5993                 /* Give autoneg time to complete. */
5994                 tp->serdes_counter--;
5995                 return;
5996         }
5997
5998         if (!tp->link_up &&
5999             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6000                 u32 bmcr;
6001
6002                 tg3_readphy(tp, MII_BMCR, &bmcr);
6003                 if (bmcr & BMCR_ANENABLE) {
6004                         u32 phy1, phy2;
6005
6006                         /* Select shadow register 0x1f */
6007                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6008                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6009
6010                         /* Select expansion interrupt status register */
6011                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6012                                          MII_TG3_DSP_EXP1_INT_STAT);
6013                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6014                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6015
6016                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6017                                 /* We have signal detect and not receiving
6018                                  * config code words, link is up by parallel
6019                                  * detection.
6020                                  */
6021
6022                                 bmcr &= ~BMCR_ANENABLE;
6023                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6024                                 tg3_writephy(tp, MII_BMCR, bmcr);
6025                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6026                         }
6027                 }
6028         } else if (tp->link_up &&
6029                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6030                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6031                 u32 phy2;
6032
6033                 /* Select expansion interrupt status register */
6034                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6035                                  MII_TG3_DSP_EXP1_INT_STAT);
6036                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6037                 if (phy2 & 0x20) {
6038                         u32 bmcr;
6039
6040                         /* Config code words received, turn on autoneg. */
6041                         tg3_readphy(tp, MII_BMCR, &bmcr);
6042                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6043
6044                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6045
6046                 }
6047         }
6048 }
6049
6050 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6051 {
6052         u32 val;
6053         int err;
6054
6055         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6056                 err = tg3_setup_fiber_phy(tp, force_reset);
6057         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6058                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6059         else
6060                 err = tg3_setup_copper_phy(tp, force_reset);
6061
6062         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6063                 u32 scale;
6064
6065                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6066                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6067                         scale = 65;
6068                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6069                         scale = 6;
6070                 else
6071                         scale = 12;
6072
6073                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6074                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6075                 tw32(GRC_MISC_CFG, val);
6076         }
6077
6078         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6079               (6 << TX_LENGTHS_IPG_SHIFT);
6080         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6081             tg3_asic_rev(tp) == ASIC_REV_5762)
6082                 val |= tr32(MAC_TX_LENGTHS) &
6083                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
6084                         TX_LENGTHS_CNT_DWN_VAL_MSK);
6085
6086         if (tp->link_config.active_speed == SPEED_1000 &&
6087             tp->link_config.active_duplex == DUPLEX_HALF)
6088                 tw32(MAC_TX_LENGTHS, val |
6089                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6090         else
6091                 tw32(MAC_TX_LENGTHS, val |
6092                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6093
6094         if (!tg3_flag(tp, 5705_PLUS)) {
6095                 if (tp->link_up) {
6096                         tw32(HOSTCC_STAT_COAL_TICKS,
6097                              tp->coal.stats_block_coalesce_usecs);
6098                 } else {
6099                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
6100                 }
6101         }
6102
6103         if (tg3_flag(tp, ASPM_WORKAROUND)) {
6104                 val = tr32(PCIE_PWR_MGMT_THRESH);
6105                 if (!tp->link_up)
6106                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6107                               tp->pwrmgmt_thresh;
6108                 else
6109                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6110                 tw32(PCIE_PWR_MGMT_THRESH, val);
6111         }
6112
6113         return err;
6114 }
6115
6116 /* tp->lock must be held */
6117 static u64 tg3_refclk_read(struct tg3 *tp)
6118 {
6119         u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6120         return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6121 }
6122
6123 /* tp->lock must be held */
6124 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6125 {
6126         u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6127
6128         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6129         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6130         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6131         tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6132 }
6133
6134 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6135 static inline void tg3_full_unlock(struct tg3 *tp);
6136 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6137 {
6138         struct tg3 *tp = netdev_priv(dev);
6139
6140         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6141                                 SOF_TIMESTAMPING_RX_SOFTWARE |
6142                                 SOF_TIMESTAMPING_SOFTWARE;
6143
6144         if (tg3_flag(tp, PTP_CAPABLE)) {
6145                 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6146                                         SOF_TIMESTAMPING_RX_HARDWARE |
6147                                         SOF_TIMESTAMPING_RAW_HARDWARE;
6148         }
6149
6150         if (tp->ptp_clock)
6151                 info->phc_index = ptp_clock_index(tp->ptp_clock);
6152         else
6153                 info->phc_index = -1;
6154
6155         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6156
6157         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6158                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6159                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6160                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6161         return 0;
6162 }
6163
6164 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6165 {
6166         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6167         bool neg_adj = false;
6168         u32 correction = 0;
6169
6170         if (ppb < 0) {
6171                 neg_adj = true;
6172                 ppb = -ppb;
6173         }
6174
6175         /* Frequency adjustment is performed using hardware with a 24 bit
6176          * accumulator and a programmable correction value. On each clk, the
6177          * correction value gets added to the accumulator and when it
6178          * overflows, the time counter is incremented/decremented.
6179          *
6180          * So conversion from ppb to correction value is
6181          *              ppb * (1 << 24) / 1000000000
6182          */
6183         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6184                      TG3_EAV_REF_CLK_CORRECT_MASK;
6185
6186         tg3_full_lock(tp, 0);
6187
6188         if (correction)
6189                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6190                      TG3_EAV_REF_CLK_CORRECT_EN |
6191                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6192         else
6193                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6194
6195         tg3_full_unlock(tp);
6196
6197         return 0;
6198 }
6199
6200 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6201 {
6202         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6203
6204         tg3_full_lock(tp, 0);
6205         tp->ptp_adjust += delta;
6206         tg3_full_unlock(tp);
6207
6208         return 0;
6209 }
6210
6211 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
6212 {
6213         u64 ns;
6214         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6215
6216         tg3_full_lock(tp, 0);
6217         ns = tg3_refclk_read(tp);
6218         ns += tp->ptp_adjust;
6219         tg3_full_unlock(tp);
6220
6221         *ts = ns_to_timespec64(ns);
6222
6223         return 0;
6224 }
6225
6226 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6227                            const struct timespec64 *ts)
6228 {
6229         u64 ns;
6230         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6231
6232         ns = timespec64_to_ns(ts);
6233
6234         tg3_full_lock(tp, 0);
6235         tg3_refclk_write(tp, ns);
6236         tp->ptp_adjust = 0;
6237         tg3_full_unlock(tp);
6238
6239         return 0;
6240 }
6241
6242 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6243                           struct ptp_clock_request *rq, int on)
6244 {
6245         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6246         u32 clock_ctl;
6247         int rval = 0;
6248
6249         switch (rq->type) {
6250         case PTP_CLK_REQ_PEROUT:
6251                 if (rq->perout.index != 0)
6252                         return -EINVAL;
6253
6254                 tg3_full_lock(tp, 0);
6255                 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6256                 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6257
6258                 if (on) {
6259                         u64 nsec;
6260
6261                         nsec = rq->perout.start.sec * 1000000000ULL +
6262                                rq->perout.start.nsec;
6263
6264                         if (rq->perout.period.sec || rq->perout.period.nsec) {
6265                                 netdev_warn(tp->dev,
6266                                             "Device supports only a one-shot timesync output, period must be 0\n");
6267                                 rval = -EINVAL;
6268                                 goto err_out;
6269                         }
6270
6271                         if (nsec & (1ULL << 63)) {
6272                                 netdev_warn(tp->dev,
6273                                             "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6274                                 rval = -EINVAL;
6275                                 goto err_out;
6276                         }
6277
6278                         tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6279                         tw32(TG3_EAV_WATCHDOG0_MSB,
6280                              TG3_EAV_WATCHDOG0_EN |
6281                              ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6282
6283                         tw32(TG3_EAV_REF_CLCK_CTL,
6284                              clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6285                 } else {
6286                         tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6287                         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6288                 }
6289
6290 err_out:
6291                 tg3_full_unlock(tp);
6292                 return rval;
6293
6294         default:
6295                 break;
6296         }
6297
6298         return -EOPNOTSUPP;
6299 }
6300
6301 static const struct ptp_clock_info tg3_ptp_caps = {
6302         .owner          = THIS_MODULE,
6303         .name           = "tg3 clock",
6304         .max_adj        = 250000000,
6305         .n_alarm        = 0,
6306         .n_ext_ts       = 0,
6307         .n_per_out      = 1,
6308         .n_pins         = 0,
6309         .pps            = 0,
6310         .adjfreq        = tg3_ptp_adjfreq,
6311         .adjtime        = tg3_ptp_adjtime,
6312         .gettime64      = tg3_ptp_gettime,
6313         .settime64      = tg3_ptp_settime,
6314         .enable         = tg3_ptp_enable,
6315 };
6316
6317 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6318                                      struct skb_shared_hwtstamps *timestamp)
6319 {
6320         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6321         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6322                                            tp->ptp_adjust);
6323 }
6324
6325 /* tp->lock must be held */
6326 static void tg3_ptp_init(struct tg3 *tp)
6327 {
6328         if (!tg3_flag(tp, PTP_CAPABLE))
6329                 return;
6330
6331         /* Initialize the hardware clock to the system time. */
6332         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6333         tp->ptp_adjust = 0;
6334         tp->ptp_info = tg3_ptp_caps;
6335 }
6336
6337 /* tp->lock must be held */
6338 static void tg3_ptp_resume(struct tg3 *tp)
6339 {
6340         if (!tg3_flag(tp, PTP_CAPABLE))
6341                 return;
6342
6343         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6344         tp->ptp_adjust = 0;
6345 }
6346
6347 static void tg3_ptp_fini(struct tg3 *tp)
6348 {
6349         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6350                 return;
6351
6352         ptp_clock_unregister(tp->ptp_clock);
6353         tp->ptp_clock = NULL;
6354         tp->ptp_adjust = 0;
6355 }
6356
6357 static inline int tg3_irq_sync(struct tg3 *tp)
6358 {
6359         return tp->irq_sync;
6360 }
6361
6362 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6363 {
6364         int i;
6365
6366         dst = (u32 *)((u8 *)dst + off);
6367         for (i = 0; i < len; i += sizeof(u32))
6368                 *dst++ = tr32(off + i);
6369 }
6370
6371 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6372 {
6373         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6374         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6375         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6376         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6377         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6378         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6379         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6380         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6381         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6382         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6383         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6384         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6385         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6386         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6387         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6388         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6389         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6390         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6391         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6392
6393         if (tg3_flag(tp, SUPPORT_MSIX))
6394                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6395
6396         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6397         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6398         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6399         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6400         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6401         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6402         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6403         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6404
6405         if (!tg3_flag(tp, 5705_PLUS)) {
6406                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6407                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6408                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6409         }
6410
6411         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6412         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6413         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6414         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6415         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6416
6417         if (tg3_flag(tp, NVRAM))
6418                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6419 }
6420
6421 static void tg3_dump_state(struct tg3 *tp)
6422 {
6423         int i;
6424         u32 *regs;
6425
6426         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6427         if (!regs)
6428                 return;
6429
6430         if (tg3_flag(tp, PCI_EXPRESS)) {
6431                 /* Read up to but not including private PCI registers */
6432                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6433                         regs[i / sizeof(u32)] = tr32(i);
6434         } else
6435                 tg3_dump_legacy_regs(tp, regs);
6436
6437         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6438                 if (!regs[i + 0] && !regs[i + 1] &&
6439                     !regs[i + 2] && !regs[i + 3])
6440                         continue;
6441
6442                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6443                            i * 4,
6444                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6445         }
6446
6447         kfree(regs);
6448
6449         for (i = 0; i < tp->irq_cnt; i++) {
6450                 struct tg3_napi *tnapi = &tp->napi[i];
6451
6452                 /* SW status block */
6453                 netdev_err(tp->dev,
6454                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6455                            i,
6456                            tnapi->hw_status->status,
6457                            tnapi->hw_status->status_tag,
6458                            tnapi->hw_status->rx_jumbo_consumer,
6459                            tnapi->hw_status->rx_consumer,
6460                            tnapi->hw_status->rx_mini_consumer,
6461                            tnapi->hw_status->idx[0].rx_producer,
6462                            tnapi->hw_status->idx[0].tx_consumer);
6463
6464                 netdev_err(tp->dev,
6465                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6466                            i,
6467                            tnapi->last_tag, tnapi->last_irq_tag,
6468                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6469                            tnapi->rx_rcb_ptr,
6470                            tnapi->prodring.rx_std_prod_idx,
6471                            tnapi->prodring.rx_std_cons_idx,
6472                            tnapi->prodring.rx_jmb_prod_idx,
6473                            tnapi->prodring.rx_jmb_cons_idx);
6474         }
6475 }
6476
6477 /* This is called whenever we suspect that the system chipset is re-
6478  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6479  * is bogus tx completions. We try to recover by setting the
6480  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6481  * in the workqueue.
6482  */
6483 static void tg3_tx_recover(struct tg3 *tp)
6484 {
6485         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6486                tp->write32_tx_mbox == tg3_write_indirect_mbox);
6487
6488         netdev_warn(tp->dev,
6489                     "The system may be re-ordering memory-mapped I/O "
6490                     "cycles to the network device, attempting to recover. "
6491                     "Please report the problem to the driver maintainer "
6492                     "and include system chipset information.\n");
6493
6494         tg3_flag_set(tp, TX_RECOVERY_PENDING);
6495 }
6496
6497 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6498 {
6499         /* Tell compiler to fetch tx indices from memory. */
6500         barrier();
6501         return tnapi->tx_pending -
6502                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6503 }
6504
6505 /* Tigon3 never reports partial packet sends.  So we do not
6506  * need special logic to handle SKBs that have not had all
6507  * of their frags sent yet, like SunGEM does.
6508  */
6509 static void tg3_tx(struct tg3_napi *tnapi)
6510 {
6511         struct tg3 *tp = tnapi->tp;
6512         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6513         u32 sw_idx = tnapi->tx_cons;
6514         struct netdev_queue *txq;
6515         int index = tnapi - tp->napi;
6516         unsigned int pkts_compl = 0, bytes_compl = 0;
6517
6518         if (tg3_flag(tp, ENABLE_TSS))
6519                 index--;
6520
6521         txq = netdev_get_tx_queue(tp->dev, index);
6522
6523         while (sw_idx != hw_idx) {
6524                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6525                 struct sk_buff *skb = ri->skb;
6526                 int i, tx_bug = 0;
6527
6528                 if (unlikely(skb == NULL)) {
6529                         tg3_tx_recover(tp);
6530                         return;
6531                 }
6532
6533                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6534                         struct skb_shared_hwtstamps timestamp;
6535                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6536                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6537
6538                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6539
6540                         skb_tstamp_tx(skb, &timestamp);
6541                 }
6542
6543                 pci_unmap_single(tp->pdev,
6544                                  dma_unmap_addr(ri, mapping),
6545                                  skb_headlen(skb),
6546                                  PCI_DMA_TODEVICE);
6547
6548                 ri->skb = NULL;
6549
6550                 while (ri->fragmented) {
6551                         ri->fragmented = false;
6552                         sw_idx = NEXT_TX(sw_idx);
6553                         ri = &tnapi->tx_buffers[sw_idx];
6554                 }
6555
6556                 sw_idx = NEXT_TX(sw_idx);
6557
6558                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6559                         ri = &tnapi->tx_buffers[sw_idx];
6560                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6561                                 tx_bug = 1;
6562
6563                         pci_unmap_page(tp->pdev,
6564                                        dma_unmap_addr(ri, mapping),
6565                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
6566                                        PCI_DMA_TODEVICE);
6567
6568                         while (ri->fragmented) {
6569                                 ri->fragmented = false;
6570                                 sw_idx = NEXT_TX(sw_idx);
6571                                 ri = &tnapi->tx_buffers[sw_idx];
6572                         }
6573
6574                         sw_idx = NEXT_TX(sw_idx);
6575                 }
6576
6577                 pkts_compl++;
6578                 bytes_compl += skb->len;
6579
6580                 dev_kfree_skb_any(skb);
6581
6582                 if (unlikely(tx_bug)) {
6583                         tg3_tx_recover(tp);
6584                         return;
6585                 }
6586         }
6587
6588         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6589
6590         tnapi->tx_cons = sw_idx;
6591
6592         /* Need to make the tx_cons update visible to tg3_start_xmit()
6593          * before checking for netif_queue_stopped().  Without the
6594          * memory barrier, there is a small possibility that tg3_start_xmit()
6595          * will miss it and cause the queue to be stopped forever.
6596          */
6597         smp_mb();
6598
6599         if (unlikely(netif_tx_queue_stopped(txq) &&
6600                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6601                 __netif_tx_lock(txq, smp_processor_id());
6602                 if (netif_tx_queue_stopped(txq) &&
6603                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6604                         netif_tx_wake_queue(txq);
6605                 __netif_tx_unlock(txq);
6606         }
6607 }
6608
6609 static void tg3_frag_free(bool is_frag, void *data)
6610 {
6611         if (is_frag)
6612                 skb_free_frag(data);
6613         else
6614                 kfree(data);
6615 }
6616
6617 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6618 {
6619         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6620                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6621
6622         if (!ri->data)
6623                 return;
6624
6625         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6626                          map_sz, PCI_DMA_FROMDEVICE);
6627         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6628         ri->data = NULL;
6629 }
6630
6631
6632 /* Returns size of skb allocated or < 0 on error.
6633  *
6634  * We only need to fill in the address because the other members
6635  * of the RX descriptor are invariant, see tg3_init_rings.
6636  *
6637  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6638  * posting buffers we only dirty the first cache line of the RX
6639  * descriptor (containing the address).  Whereas for the RX status
6640  * buffers the cpu only reads the last cacheline of the RX descriptor
6641  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6642  */
6643 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6644                              u32 opaque_key, u32 dest_idx_unmasked,
6645                              unsigned int *frag_size)
6646 {
6647         struct tg3_rx_buffer_desc *desc;
6648         struct ring_info *map;
6649         u8 *data;
6650         dma_addr_t mapping;
6651         int skb_size, data_size, dest_idx;
6652
6653         switch (opaque_key) {
6654         case RXD_OPAQUE_RING_STD:
6655                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6656                 desc = &tpr->rx_std[dest_idx];
6657                 map = &tpr->rx_std_buffers[dest_idx];
6658                 data_size = tp->rx_pkt_map_sz;
6659                 break;
6660
6661         case RXD_OPAQUE_RING_JUMBO:
6662                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6663                 desc = &tpr->rx_jmb[dest_idx].std;
6664                 map = &tpr->rx_jmb_buffers[dest_idx];
6665                 data_size = TG3_RX_JMB_MAP_SZ;
6666                 break;
6667
6668         default:
6669                 return -EINVAL;
6670         }
6671
6672         /* Do not overwrite any of the map or rp information
6673          * until we are sure we can commit to a new buffer.
6674          *
6675          * Callers depend upon this behavior and assume that
6676          * we leave everything unchanged if we fail.
6677          */
6678         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6679                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6680         if (skb_size <= PAGE_SIZE) {
6681                 data = netdev_alloc_frag(skb_size);
6682                 *frag_size = skb_size;
6683         } else {
6684                 data = kmalloc(skb_size, GFP_ATOMIC);
6685                 *frag_size = 0;
6686         }
6687         if (!data)
6688                 return -ENOMEM;
6689
6690         mapping = pci_map_single(tp->pdev,
6691                                  data + TG3_RX_OFFSET(tp),
6692                                  data_size,
6693                                  PCI_DMA_FROMDEVICE);
6694         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6695                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6696                 return -EIO;
6697         }
6698
6699         map->data = data;
6700         dma_unmap_addr_set(map, mapping, mapping);
6701
6702         desc->addr_hi = ((u64)mapping >> 32);
6703         desc->addr_lo = ((u64)mapping & 0xffffffff);
6704
6705         return data_size;
6706 }
6707
6708 /* We only need to move over in the address because the other
6709  * members of the RX descriptor are invariant.  See notes above
6710  * tg3_alloc_rx_data for full details.
6711  */
6712 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6713                            struct tg3_rx_prodring_set *dpr,
6714                            u32 opaque_key, int src_idx,
6715                            u32 dest_idx_unmasked)
6716 {
6717         struct tg3 *tp = tnapi->tp;
6718         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6719         struct ring_info *src_map, *dest_map;
6720         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6721         int dest_idx;
6722
6723         switch (opaque_key) {
6724         case RXD_OPAQUE_RING_STD:
6725                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6726                 dest_desc = &dpr->rx_std[dest_idx];
6727                 dest_map = &dpr->rx_std_buffers[dest_idx];
6728                 src_desc = &spr->rx_std[src_idx];
6729                 src_map = &spr->rx_std_buffers[src_idx];
6730                 break;
6731
6732         case RXD_OPAQUE_RING_JUMBO:
6733                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6734                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6735                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6736                 src_desc = &spr->rx_jmb[src_idx].std;
6737                 src_map = &spr->rx_jmb_buffers[src_idx];
6738                 break;
6739
6740         default:
6741                 return;
6742         }
6743
6744         dest_map->data = src_map->data;
6745         dma_unmap_addr_set(dest_map, mapping,
6746                            dma_unmap_addr(src_map, mapping));
6747         dest_desc->addr_hi = src_desc->addr_hi;
6748         dest_desc->addr_lo = src_desc->addr_lo;
6749
6750         /* Ensure that the update to the skb happens after the physical
6751          * addresses have been transferred to the new BD location.
6752          */
6753         smp_wmb();
6754
6755         src_map->data = NULL;
6756 }
6757
6758 /* The RX ring scheme is composed of multiple rings which post fresh
6759  * buffers to the chip, and one special ring the chip uses to report
6760  * status back to the host.
6761  *
6762  * The special ring reports the status of received packets to the
6763  * host.  The chip does not write into the original descriptor the
6764  * RX buffer was obtained from.  The chip simply takes the original
6765  * descriptor as provided by the host, updates the status and length
6766  * field, then writes this into the next status ring entry.
6767  *
6768  * Each ring the host uses to post buffers to the chip is described
6769  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6770  * it is first placed into the on-chip ram.  When the packet's length
6771  * is known, it walks down the TG3_BDINFO entries to select the ring.
6772  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6773  * which is within the range of the new packet's length is chosen.
6774  *
6775  * The "separate ring for rx status" scheme may sound queer, but it makes
6776  * sense from a cache coherency perspective.  If only the host writes
6777  * to the buffer post rings, and only the chip writes to the rx status
6778  * rings, then cache lines never move beyond shared-modified state.
6779  * If both the host and chip were to write into the same ring, cache line
6780  * eviction could occur since both entities want it in an exclusive state.
6781  */
6782 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6783 {
6784         struct tg3 *tp = tnapi->tp;
6785         u32 work_mask, rx_std_posted = 0;
6786         u32 std_prod_idx, jmb_prod_idx;
6787         u32 sw_idx = tnapi->rx_rcb_ptr;
6788         u16 hw_idx;
6789         int received;
6790         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6791
6792         hw_idx = *(tnapi->rx_rcb_prod_idx);
6793         /*
6794          * We need to order the read of hw_idx and the read of
6795          * the opaque cookie.
6796          */
6797         rmb();
6798         work_mask = 0;
6799         received = 0;
6800         std_prod_idx = tpr->rx_std_prod_idx;
6801         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6802         while (sw_idx != hw_idx && budget > 0) {
6803                 struct ring_info *ri;
6804                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6805                 unsigned int len;
6806                 struct sk_buff *skb;
6807                 dma_addr_t dma_addr;
6808                 u32 opaque_key, desc_idx, *post_ptr;
6809                 u8 *data;
6810                 u64 tstamp = 0;
6811
6812                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6813                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6814                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6815                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6816                         dma_addr = dma_unmap_addr(ri, mapping);
6817                         data = ri->data;
6818                         post_ptr = &std_prod_idx;
6819                         rx_std_posted++;
6820                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6821                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6822                         dma_addr = dma_unmap_addr(ri, mapping);
6823                         data = ri->data;
6824                         post_ptr = &jmb_prod_idx;
6825                 } else
6826                         goto next_pkt_nopost;
6827
6828                 work_mask |= opaque_key;
6829
6830                 if (desc->err_vlan & RXD_ERR_MASK) {
6831                 drop_it:
6832                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6833                                        desc_idx, *post_ptr);
6834                 drop_it_no_recycle:
6835                         /* Other statistics kept track of by card. */
6836                         tp->rx_dropped++;
6837                         goto next_pkt;
6838                 }
6839
6840                 prefetch(data + TG3_RX_OFFSET(tp));
6841                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6842                       ETH_FCS_LEN;
6843
6844                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6845                      RXD_FLAG_PTPSTAT_PTPV1 ||
6846                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6847                      RXD_FLAG_PTPSTAT_PTPV2) {
6848                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6849                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6850                 }
6851
6852                 if (len > TG3_RX_COPY_THRESH(tp)) {
6853                         int skb_size;
6854                         unsigned int frag_size;
6855
6856                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6857                                                     *post_ptr, &frag_size);
6858                         if (skb_size < 0)
6859                                 goto drop_it;
6860
6861                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6862                                          PCI_DMA_FROMDEVICE);
6863
6864                         /* Ensure that the update to the data happens
6865                          * after the usage of the old DMA mapping.
6866                          */
6867                         smp_wmb();
6868
6869                         ri->data = NULL;
6870
6871                         skb = build_skb(data, frag_size);
6872                         if (!skb) {
6873                                 tg3_frag_free(frag_size != 0, data);
6874                                 goto drop_it_no_recycle;
6875                         }
6876                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6877                 } else {
6878                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6879                                        desc_idx, *post_ptr);
6880
6881                         skb = netdev_alloc_skb(tp->dev,
6882                                                len + TG3_RAW_IP_ALIGN);
6883                         if (skb == NULL)
6884                                 goto drop_it_no_recycle;
6885
6886                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6887                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6888                         memcpy(skb->data,
6889                                data + TG3_RX_OFFSET(tp),
6890                                len);
6891                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6892                 }
6893
6894                 skb_put(skb, len);
6895                 if (tstamp)
6896                         tg3_hwclock_to_timestamp(tp, tstamp,
6897                                                  skb_hwtstamps(skb));
6898
6899                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6900                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6901                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6902                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6903                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6904                 else
6905                         skb_checksum_none_assert(skb);
6906
6907                 skb->protocol = eth_type_trans(skb, tp->dev);
6908
6909                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6910                     skb->protocol != htons(ETH_P_8021Q) &&
6911                     skb->protocol != htons(ETH_P_8021AD)) {
6912                         dev_kfree_skb_any(skb);
6913                         goto drop_it_no_recycle;
6914                 }
6915
6916                 if (desc->type_flags & RXD_FLAG_VLAN &&
6917                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6918                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6919                                                desc->err_vlan & RXD_VLAN_MASK);
6920
6921                 napi_gro_receive(&tnapi->napi, skb);
6922
6923                 received++;
6924                 budget--;
6925
6926 next_pkt:
6927                 (*post_ptr)++;
6928
6929                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6930                         tpr->rx_std_prod_idx = std_prod_idx &
6931                                                tp->rx_std_ring_mask;
6932                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6933                                      tpr->rx_std_prod_idx);
6934                         work_mask &= ~RXD_OPAQUE_RING_STD;
6935                         rx_std_posted = 0;
6936                 }
6937 next_pkt_nopost:
6938                 sw_idx++;
6939                 sw_idx &= tp->rx_ret_ring_mask;
6940
6941                 /* Refresh hw_idx to see if there is new work */
6942                 if (sw_idx == hw_idx) {
6943                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6944                         rmb();
6945                 }
6946         }
6947
6948         /* ACK the status ring. */
6949         tnapi->rx_rcb_ptr = sw_idx;
6950         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6951
6952         /* Refill RX ring(s). */
6953         if (!tg3_flag(tp, ENABLE_RSS)) {
6954                 /* Sync BD data before updating mailbox */
6955                 wmb();
6956
6957                 if (work_mask & RXD_OPAQUE_RING_STD) {
6958                         tpr->rx_std_prod_idx = std_prod_idx &
6959                                                tp->rx_std_ring_mask;
6960                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6961                                      tpr->rx_std_prod_idx);
6962                 }
6963                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6964                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6965                                                tp->rx_jmb_ring_mask;
6966                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6967                                      tpr->rx_jmb_prod_idx);
6968                 }
6969                 mmiowb();
6970         } else if (work_mask) {
6971                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6972                  * updated before the producer indices can be updated.
6973                  */
6974                 smp_wmb();
6975
6976                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6977                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6978
6979                 if (tnapi != &tp->napi[1]) {
6980                         tp->rx_refill = true;
6981                         napi_schedule(&tp->napi[1].napi);
6982                 }
6983         }
6984
6985         return received;
6986 }
6987
6988 static void tg3_poll_link(struct tg3 *tp)
6989 {
6990         /* handle link change and other phy events */
6991         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6992                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6993
6994                 if (sblk->status & SD_STATUS_LINK_CHG) {
6995                         sblk->status = SD_STATUS_UPDATED |
6996                                        (sblk->status & ~SD_STATUS_LINK_CHG);
6997                         spin_lock(&tp->lock);
6998                         if (tg3_flag(tp, USE_PHYLIB)) {
6999                                 tw32_f(MAC_STATUS,
7000                                      (MAC_STATUS_SYNC_CHANGED |
7001                                       MAC_STATUS_CFG_CHANGED |
7002                                       MAC_STATUS_MI_COMPLETION |
7003                                       MAC_STATUS_LNKSTATE_CHANGED));
7004                                 udelay(40);
7005                         } else
7006                                 tg3_setup_phy(tp, false);
7007                         spin_unlock(&tp->lock);
7008                 }
7009         }
7010 }
7011
7012 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7013                                 struct tg3_rx_prodring_set *dpr,
7014                                 struct tg3_rx_prodring_set *spr)
7015 {
7016         u32 si, di, cpycnt, src_prod_idx;
7017         int i, err = 0;
7018
7019         while (1) {
7020                 src_prod_idx = spr->rx_std_prod_idx;
7021
7022                 /* Make sure updates to the rx_std_buffers[] entries and the
7023                  * standard producer index are seen in the correct order.
7024                  */
7025                 smp_rmb();
7026
7027                 if (spr->rx_std_cons_idx == src_prod_idx)
7028                         break;
7029
7030                 if (spr->rx_std_cons_idx < src_prod_idx)
7031                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7032                 else
7033                         cpycnt = tp->rx_std_ring_mask + 1 -
7034                                  spr->rx_std_cons_idx;
7035
7036                 cpycnt = min(cpycnt,
7037                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7038
7039                 si = spr->rx_std_cons_idx;
7040                 di = dpr->rx_std_prod_idx;
7041
7042                 for (i = di; i < di + cpycnt; i++) {
7043                         if (dpr->rx_std_buffers[i].data) {
7044                                 cpycnt = i - di;
7045                                 err = -ENOSPC;
7046                                 break;
7047                         }
7048                 }
7049
7050                 if (!cpycnt)
7051                         break;
7052
7053                 /* Ensure that updates to the rx_std_buffers ring and the
7054                  * shadowed hardware producer ring from tg3_recycle_skb() are
7055                  * ordered correctly WRT the skb check above.
7056                  */
7057                 smp_rmb();
7058
7059                 memcpy(&dpr->rx_std_buffers[di],
7060                        &spr->rx_std_buffers[si],
7061                        cpycnt * sizeof(struct ring_info));
7062
7063                 for (i = 0; i < cpycnt; i++, di++, si++) {
7064                         struct tg3_rx_buffer_desc *sbd, *dbd;
7065                         sbd = &spr->rx_std[si];
7066                         dbd = &dpr->rx_std[di];
7067                         dbd->addr_hi = sbd->addr_hi;
7068                         dbd->addr_lo = sbd->addr_lo;
7069                 }
7070
7071                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7072                                        tp->rx_std_ring_mask;
7073                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7074                                        tp->rx_std_ring_mask;
7075         }
7076
7077         while (1) {
7078                 src_prod_idx = spr->rx_jmb_prod_idx;
7079
7080                 /* Make sure updates to the rx_jmb_buffers[] entries and
7081                  * the jumbo producer index are seen in the correct order.
7082                  */
7083                 smp_rmb();
7084
7085                 if (spr->rx_jmb_cons_idx == src_prod_idx)
7086                         break;
7087
7088                 if (spr->rx_jmb_cons_idx < src_prod_idx)
7089                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7090                 else
7091                         cpycnt = tp->rx_jmb_ring_mask + 1 -
7092                                  spr->rx_jmb_cons_idx;
7093
7094                 cpycnt = min(cpycnt,
7095                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7096
7097                 si = spr->rx_jmb_cons_idx;
7098                 di = dpr->rx_jmb_prod_idx;
7099
7100                 for (i = di; i < di + cpycnt; i++) {
7101                         if (dpr->rx_jmb_buffers[i].data) {
7102                                 cpycnt = i - di;
7103                                 err = -ENOSPC;
7104                                 break;
7105                         }
7106                 }
7107
7108                 if (!cpycnt)
7109                         break;
7110
7111                 /* Ensure that updates to the rx_jmb_buffers ring and the
7112                  * shadowed hardware producer ring from tg3_recycle_skb() are
7113                  * ordered correctly WRT the skb check above.
7114                  */
7115                 smp_rmb();
7116
7117                 memcpy(&dpr->rx_jmb_buffers[di],
7118                        &spr->rx_jmb_buffers[si],
7119                        cpycnt * sizeof(struct ring_info));
7120
7121                 for (i = 0; i < cpycnt; i++, di++, si++) {
7122                         struct tg3_rx_buffer_desc *sbd, *dbd;
7123                         sbd = &spr->rx_jmb[si].std;
7124                         dbd = &dpr->rx_jmb[di].std;
7125                         dbd->addr_hi = sbd->addr_hi;
7126                         dbd->addr_lo = sbd->addr_lo;
7127                 }
7128
7129                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7130                                        tp->rx_jmb_ring_mask;
7131                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7132                                        tp->rx_jmb_ring_mask;
7133         }
7134
7135         return err;
7136 }
7137
7138 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7139 {
7140         struct tg3 *tp = tnapi->tp;
7141
7142         /* run TX completion thread */
7143         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7144                 tg3_tx(tnapi);
7145                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7146                         return work_done;
7147         }
7148
7149         if (!tnapi->rx_rcb_prod_idx)
7150                 return work_done;
7151
7152         /* run RX thread, within the bounds set by NAPI.
7153          * All RX "locking" is done by ensuring outside
7154          * code synchronizes with tg3->napi.poll()
7155          */
7156         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7157                 work_done += tg3_rx(tnapi, budget - work_done);
7158
7159         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7160                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7161                 int i, err = 0;
7162                 u32 std_prod_idx = dpr->rx_std_prod_idx;
7163                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7164
7165                 tp->rx_refill = false;
7166                 for (i = 1; i <= tp->rxq_cnt; i++)
7167                         err |= tg3_rx_prodring_xfer(tp, dpr,
7168                                                     &tp->napi[i].prodring);
7169
7170                 wmb();
7171
7172                 if (std_prod_idx != dpr->rx_std_prod_idx)
7173                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7174                                      dpr->rx_std_prod_idx);
7175
7176                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7177                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7178                                      dpr->rx_jmb_prod_idx);
7179
7180                 mmiowb();
7181
7182                 if (err)
7183                         tw32_f(HOSTCC_MODE, tp->coal_now);
7184         }
7185
7186         return work_done;
7187 }
7188
7189 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7190 {
7191         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7192                 schedule_work(&tp->reset_task);
7193 }
7194
7195 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7196 {
7197         if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7198                 cancel_work_sync(&tp->reset_task);
7199         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7200 }
7201
7202 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7203 {
7204         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7205         struct tg3 *tp = tnapi->tp;
7206         int work_done = 0;
7207         struct tg3_hw_status *sblk = tnapi->hw_status;
7208
7209         while (1) {
7210                 work_done = tg3_poll_work(tnapi, work_done, budget);
7211
7212                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7213                         goto tx_recovery;
7214
7215                 if (unlikely(work_done >= budget))
7216                         break;
7217
7218                 /* tp->last_tag is used in tg3_int_reenable() below
7219                  * to tell the hw how much work has been processed,
7220                  * so we must read it before checking for more work.
7221                  */
7222                 tnapi->last_tag = sblk->status_tag;
7223                 tnapi->last_irq_tag = tnapi->last_tag;
7224                 rmb();
7225
7226                 /* check for RX/TX work to do */
7227                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7228                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7229
7230                         /* This test here is not race free, but will reduce
7231                          * the number of interrupts by looping again.
7232                          */
7233                         if (tnapi == &tp->napi[1] && tp->rx_refill)
7234                                 continue;
7235
7236                         napi_complete_done(napi, work_done);
7237                         /* Reenable interrupts. */
7238                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7239
7240                         /* This test here is synchronized by napi_schedule()
7241                          * and napi_complete() to close the race condition.
7242                          */
7243                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7244                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
7245                                                   HOSTCC_MODE_ENABLE |
7246                                                   tnapi->coal_now);
7247                         }
7248                         mmiowb();
7249                         break;
7250                 }
7251         }
7252
7253         return work_done;
7254
7255 tx_recovery:
7256         /* work_done is guaranteed to be less than budget. */
7257         napi_complete(napi);
7258         tg3_reset_task_schedule(tp);
7259         return work_done;
7260 }
7261
7262 static void tg3_process_error(struct tg3 *tp)
7263 {
7264         u32 val;
7265         bool real_error = false;
7266
7267         if (tg3_flag(tp, ERROR_PROCESSED))
7268                 return;
7269
7270         /* Check Flow Attention register */
7271         val = tr32(HOSTCC_FLOW_ATTN);
7272         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7273                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7274                 real_error = true;
7275         }
7276
7277         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7278                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7279                 real_error = true;
7280         }
7281
7282         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7283                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7284                 real_error = true;
7285         }
7286
7287         if (!real_error)
7288                 return;
7289
7290         tg3_dump_state(tp);
7291
7292         tg3_flag_set(tp, ERROR_PROCESSED);
7293         tg3_reset_task_schedule(tp);
7294 }
7295
7296 static int tg3_poll(struct napi_struct *napi, int budget)
7297 {
7298         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7299         struct tg3 *tp = tnapi->tp;
7300         int work_done = 0;
7301         struct tg3_hw_status *sblk = tnapi->hw_status;
7302
7303         while (1) {
7304                 if (sblk->status & SD_STATUS_ERROR)
7305                         tg3_process_error(tp);
7306
7307                 tg3_poll_link(tp);
7308
7309                 work_done = tg3_poll_work(tnapi, work_done, budget);
7310
7311                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7312                         goto tx_recovery;
7313
7314                 if (unlikely(work_done >= budget))
7315                         break;
7316
7317                 if (tg3_flag(tp, TAGGED_STATUS)) {
7318                         /* tp->last_tag is used in tg3_int_reenable() below
7319                          * to tell the hw how much work has been processed,
7320                          * so we must read it before checking for more work.
7321                          */
7322                         tnapi->last_tag = sblk->status_tag;
7323                         tnapi->last_irq_tag = tnapi->last_tag;
7324                         rmb();
7325                 } else
7326                         sblk->status &= ~SD_STATUS_UPDATED;
7327
7328                 if (likely(!tg3_has_work(tnapi))) {
7329                         napi_complete_done(napi, work_done);
7330                         tg3_int_reenable(tnapi);
7331                         break;
7332                 }
7333         }
7334
7335         return work_done;
7336
7337 tx_recovery:
7338         /* work_done is guaranteed to be less than budget. */
7339         napi_complete(napi);
7340         tg3_reset_task_schedule(tp);
7341         return work_done;
7342 }
7343
7344 static void tg3_napi_disable(struct tg3 *tp)
7345 {
7346         int i;
7347
7348         for (i = tp->irq_cnt - 1; i >= 0; i--)
7349                 napi_disable(&tp->napi[i].napi);
7350 }
7351
7352 static void tg3_napi_enable(struct tg3 *tp)
7353 {
7354         int i;
7355
7356         for (i = 0; i < tp->irq_cnt; i++)
7357                 napi_enable(&tp->napi[i].napi);
7358 }
7359
7360 static void tg3_napi_init(struct tg3 *tp)
7361 {
7362         int i;
7363
7364         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7365         for (i = 1; i < tp->irq_cnt; i++)
7366                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7367 }
7368
7369 static void tg3_napi_fini(struct tg3 *tp)
7370 {
7371         int i;
7372
7373         for (i = 0; i < tp->irq_cnt; i++)
7374                 netif_napi_del(&tp->napi[i].napi);
7375 }
7376
7377 static inline void tg3_netif_stop(struct tg3 *tp)
7378 {
7379         tp->dev->trans_start = jiffies; /* prevent tx timeout */
7380         tg3_napi_disable(tp);
7381         netif_carrier_off(tp->dev);
7382         netif_tx_disable(tp->dev);
7383 }
7384
7385 /* tp->lock must be held */
7386 static inline void tg3_netif_start(struct tg3 *tp)
7387 {
7388         tg3_ptp_resume(tp);
7389
7390         /* NOTE: unconditional netif_tx_wake_all_queues is only
7391          * appropriate so long as all callers are assured to
7392          * have free tx slots (such as after tg3_init_hw)
7393          */
7394         netif_tx_wake_all_queues(tp->dev);
7395
7396         if (tp->link_up)
7397                 netif_carrier_on(tp->dev);
7398
7399         tg3_napi_enable(tp);
7400         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7401         tg3_enable_ints(tp);
7402 }
7403
7404 static void tg3_irq_quiesce(struct tg3 *tp)
7405         __releases(tp->lock)
7406         __acquires(tp->lock)
7407 {
7408         int i;
7409
7410         BUG_ON(tp->irq_sync);
7411
7412         tp->irq_sync = 1;
7413         smp_mb();
7414
7415         spin_unlock_bh(&tp->lock);
7416
7417         for (i = 0; i < tp->irq_cnt; i++)
7418                 synchronize_irq(tp->napi[i].irq_vec);
7419
7420         spin_lock_bh(&tp->lock);
7421 }
7422
7423 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7424  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7425  * with as well.  Most of the time, this is not necessary except when
7426  * shutting down the device.
7427  */
7428 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7429 {
7430         spin_lock_bh(&tp->lock);
7431         if (irq_sync)
7432                 tg3_irq_quiesce(tp);
7433 }
7434
7435 static inline void tg3_full_unlock(struct tg3 *tp)
7436 {
7437         spin_unlock_bh(&tp->lock);
7438 }
7439
7440 /* One-shot MSI handler - Chip automatically disables interrupt
7441  * after sending MSI so driver doesn't have to do it.
7442  */
7443 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7444 {
7445         struct tg3_napi *tnapi = dev_id;
7446         struct tg3 *tp = tnapi->tp;
7447
7448         prefetch(tnapi->hw_status);
7449         if (tnapi->rx_rcb)
7450                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7451
7452         if (likely(!tg3_irq_sync(tp)))
7453                 napi_schedule(&tnapi->napi);
7454
7455         return IRQ_HANDLED;
7456 }
7457
7458 /* MSI ISR - No need to check for interrupt sharing and no need to
7459  * flush status block and interrupt mailbox. PCI ordering rules
7460  * guarantee that MSI will arrive after the status block.
7461  */
7462 static irqreturn_t tg3_msi(int irq, void *dev_id)
7463 {
7464         struct tg3_napi *tnapi = dev_id;
7465         struct tg3 *tp = tnapi->tp;
7466
7467         prefetch(tnapi->hw_status);
7468         if (tnapi->rx_rcb)
7469                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7470         /*
7471          * Writing any value to intr-mbox-0 clears PCI INTA# and
7472          * chip-internal interrupt pending events.
7473          * Writing non-zero to intr-mbox-0 additional tells the
7474          * NIC to stop sending us irqs, engaging "in-intr-handler"
7475          * event coalescing.
7476          */
7477         tw32_mailbox(tnapi->int_mbox, 0x00000001);
7478         if (likely(!tg3_irq_sync(tp)))
7479                 napi_schedule(&tnapi->napi);
7480
7481         return IRQ_RETVAL(1);
7482 }
7483
7484 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7485 {
7486         struct tg3_napi *tnapi = dev_id;
7487         struct tg3 *tp = tnapi->tp;
7488         struct tg3_hw_status *sblk = tnapi->hw_status;
7489         unsigned int handled = 1;
7490
7491         /* In INTx mode, it is possible for the interrupt to arrive at
7492          * the CPU before the status block posted prior to the interrupt.
7493          * Reading the PCI State register will confirm whether the
7494          * interrupt is ours and will flush the status block.
7495          */
7496         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7497                 if (tg3_flag(tp, CHIP_RESETTING) ||
7498                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7499                         handled = 0;
7500                         goto out;
7501                 }
7502         }
7503
7504         /*
7505          * Writing any value to intr-mbox-0 clears PCI INTA# and
7506          * chip-internal interrupt pending events.
7507          * Writing non-zero to intr-mbox-0 additional tells the
7508          * NIC to stop sending us irqs, engaging "in-intr-handler"
7509          * event coalescing.
7510          *
7511          * Flush the mailbox to de-assert the IRQ immediately to prevent
7512          * spurious interrupts.  The flush impacts performance but
7513          * excessive spurious interrupts can be worse in some cases.
7514          */
7515         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7516         if (tg3_irq_sync(tp))
7517                 goto out;
7518         sblk->status &= ~SD_STATUS_UPDATED;
7519         if (likely(tg3_has_work(tnapi))) {
7520                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7521                 napi_schedule(&tnapi->napi);
7522         } else {
7523                 /* No work, shared interrupt perhaps?  re-enable
7524                  * interrupts, and flush that PCI write
7525                  */
7526                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7527                                0x00000000);
7528         }
7529 out:
7530         return IRQ_RETVAL(handled);
7531 }
7532
7533 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7534 {
7535         struct tg3_napi *tnapi = dev_id;
7536         struct tg3 *tp = tnapi->tp;
7537         struct tg3_hw_status *sblk = tnapi->hw_status;
7538         unsigned int handled = 1;
7539
7540         /* In INTx mode, it is possible for the interrupt to arrive at
7541          * the CPU before the status block posted prior to the interrupt.
7542          * Reading the PCI State register will confirm whether the
7543          * interrupt is ours and will flush the status block.
7544          */
7545         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7546                 if (tg3_flag(tp, CHIP_RESETTING) ||
7547                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7548                         handled = 0;
7549                         goto out;
7550                 }
7551         }
7552
7553         /*
7554          * writing any value to intr-mbox-0 clears PCI INTA# and
7555          * chip-internal interrupt pending events.
7556          * writing non-zero to intr-mbox-0 additional tells the
7557          * NIC to stop sending us irqs, engaging "in-intr-handler"
7558          * event coalescing.
7559          *
7560          * Flush the mailbox to de-assert the IRQ immediately to prevent
7561          * spurious interrupts.  The flush impacts performance but
7562          * excessive spurious interrupts can be worse in some cases.
7563          */
7564         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7565
7566         /*
7567          * In a shared interrupt configuration, sometimes other devices'
7568          * interrupts will scream.  We record the current status tag here
7569          * so that the above check can report that the screaming interrupts
7570          * are unhandled.  Eventually they will be silenced.
7571          */
7572         tnapi->last_irq_tag = sblk->status_tag;
7573
7574         if (tg3_irq_sync(tp))
7575                 goto out;
7576
7577         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7578
7579         napi_schedule(&tnapi->napi);
7580
7581 out:
7582         return IRQ_RETVAL(handled);
7583 }
7584
7585 /* ISR for interrupt test */
7586 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7587 {
7588         struct tg3_napi *tnapi = dev_id;
7589         struct tg3 *tp = tnapi->tp;
7590         struct tg3_hw_status *sblk = tnapi->hw_status;
7591
7592         if ((sblk->status & SD_STATUS_UPDATED) ||
7593             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7594                 tg3_disable_ints(tp);
7595                 return IRQ_RETVAL(1);
7596         }
7597         return IRQ_RETVAL(0);
7598 }
7599
7600 #ifdef CONFIG_NET_POLL_CONTROLLER
7601 static void tg3_poll_controller(struct net_device *dev)
7602 {
7603         int i;
7604         struct tg3 *tp = netdev_priv(dev);
7605
7606         if (tg3_irq_sync(tp))
7607                 return;
7608
7609         for (i = 0; i < tp->irq_cnt; i++)
7610                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7611 }
7612 #endif
7613
7614 static void tg3_tx_timeout(struct net_device *dev)
7615 {
7616         struct tg3 *tp = netdev_priv(dev);
7617
7618         if (netif_msg_tx_err(tp)) {
7619                 netdev_err(dev, "transmit timed out, resetting\n");
7620                 tg3_dump_state(tp);
7621         }
7622
7623         tg3_reset_task_schedule(tp);
7624 }
7625
7626 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7627 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7628 {
7629         u32 base = (u32) mapping & 0xffffffff;
7630
7631         return base + len + 8 < base;
7632 }
7633
7634 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7635  * of any 4GB boundaries: 4G, 8G, etc
7636  */
7637 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7638                                            u32 len, u32 mss)
7639 {
7640         if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7641                 u32 base = (u32) mapping & 0xffffffff;
7642
7643                 return ((base + len + (mss & 0x3fff)) < base);
7644         }
7645         return 0;
7646 }
7647
7648 /* Test for DMA addresses > 40-bit */
7649 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7650                                           int len)
7651 {
7652 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7653         if (tg3_flag(tp, 40BIT_DMA_BUG))
7654                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7655         return 0;
7656 #else
7657         return 0;
7658 #endif
7659 }
7660
7661 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7662                                  dma_addr_t mapping, u32 len, u32 flags,
7663                                  u32 mss, u32 vlan)
7664 {
7665         txbd->addr_hi = ((u64) mapping >> 32);
7666         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7667         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7668         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7669 }
7670
7671 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7672                             dma_addr_t map, u32 len, u32 flags,
7673                             u32 mss, u32 vlan)
7674 {
7675         struct tg3 *tp = tnapi->tp;
7676         bool hwbug = false;
7677
7678         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7679                 hwbug = true;
7680
7681         if (tg3_4g_overflow_test(map, len))
7682                 hwbug = true;
7683
7684         if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7685                 hwbug = true;
7686
7687         if (tg3_40bit_overflow_test(tp, map, len))
7688                 hwbug = true;
7689
7690         if (tp->dma_limit) {
7691                 u32 prvidx = *entry;
7692                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7693                 while (len > tp->dma_limit && *budget) {
7694                         u32 frag_len = tp->dma_limit;
7695                         len -= tp->dma_limit;
7696
7697                         /* Avoid the 8byte DMA problem */
7698                         if (len <= 8) {
7699                                 len += tp->dma_limit / 2;
7700                                 frag_len = tp->dma_limit / 2;
7701                         }
7702
7703                         tnapi->tx_buffers[*entry].fragmented = true;
7704
7705                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7706                                       frag_len, tmp_flag, mss, vlan);
7707                         *budget -= 1;
7708                         prvidx = *entry;
7709                         *entry = NEXT_TX(*entry);
7710
7711                         map += frag_len;
7712                 }
7713
7714                 if (len) {
7715                         if (*budget) {
7716                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7717                                               len, flags, mss, vlan);
7718                                 *budget -= 1;
7719                                 *entry = NEXT_TX(*entry);
7720                         } else {
7721                                 hwbug = true;
7722                                 tnapi->tx_buffers[prvidx].fragmented = false;
7723                         }
7724                 }
7725         } else {
7726                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7727                               len, flags, mss, vlan);
7728                 *entry = NEXT_TX(*entry);
7729         }
7730
7731         return hwbug;
7732 }
7733
7734 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7735 {
7736         int i;
7737         struct sk_buff *skb;
7738         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7739
7740         skb = txb->skb;
7741         txb->skb = NULL;
7742
7743         pci_unmap_single(tnapi->tp->pdev,
7744                          dma_unmap_addr(txb, mapping),
7745                          skb_headlen(skb),
7746                          PCI_DMA_TODEVICE);
7747
7748         while (txb->fragmented) {
7749                 txb->fragmented = false;
7750                 entry = NEXT_TX(entry);
7751                 txb = &tnapi->tx_buffers[entry];
7752         }
7753
7754         for (i = 0; i <= last; i++) {
7755                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7756
7757                 entry = NEXT_TX(entry);
7758                 txb = &tnapi->tx_buffers[entry];
7759
7760                 pci_unmap_page(tnapi->tp->pdev,
7761                                dma_unmap_addr(txb, mapping),
7762                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7763
7764                 while (txb->fragmented) {
7765                         txb->fragmented = false;
7766                         entry = NEXT_TX(entry);
7767                         txb = &tnapi->tx_buffers[entry];
7768                 }
7769         }
7770 }
7771
7772 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7773 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7774                                        struct sk_buff **pskb,
7775                                        u32 *entry, u32 *budget,
7776                                        u32 base_flags, u32 mss, u32 vlan)
7777 {
7778         struct tg3 *tp = tnapi->tp;
7779         struct sk_buff *new_skb, *skb = *pskb;
7780         dma_addr_t new_addr = 0;
7781         int ret = 0;
7782
7783         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7784                 new_skb = skb_copy(skb, GFP_ATOMIC);
7785         else {
7786                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7787
7788                 new_skb = skb_copy_expand(skb,
7789                                           skb_headroom(skb) + more_headroom,
7790                                           skb_tailroom(skb), GFP_ATOMIC);
7791         }
7792
7793         if (!new_skb) {
7794                 ret = -1;
7795         } else {
7796                 /* New SKB is guaranteed to be linear. */
7797                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7798                                           PCI_DMA_TODEVICE);
7799                 /* Make sure the mapping succeeded */
7800                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7801                         dev_kfree_skb_any(new_skb);
7802                         ret = -1;
7803                 } else {
7804                         u32 save_entry = *entry;
7805
7806                         base_flags |= TXD_FLAG_END;
7807
7808                         tnapi->tx_buffers[*entry].skb = new_skb;
7809                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7810                                            mapping, new_addr);
7811
7812                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7813                                             new_skb->len, base_flags,
7814                                             mss, vlan)) {
7815                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7816                                 dev_kfree_skb_any(new_skb);
7817                                 ret = -1;
7818                         }
7819                 }
7820         }
7821
7822         dev_kfree_skb_any(skb);
7823         *pskb = new_skb;
7824         return ret;
7825 }
7826
7827 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7828 {
7829         /* Check if we will never have enough descriptors,
7830          * as gso_segs can be more than current ring size
7831          */
7832         return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7833 }
7834
7835 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7836
7837 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7838  * indicated in tg3_tx_frag_set()
7839  */
7840 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7841                        struct netdev_queue *txq, struct sk_buff *skb)
7842 {
7843         struct sk_buff *segs, *nskb;
7844         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7845
7846         /* Estimate the number of fragments in the worst case */
7847         if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7848                 netif_tx_stop_queue(txq);
7849
7850                 /* netif_tx_stop_queue() must be done before checking
7851                  * checking tx index in tg3_tx_avail() below, because in
7852                  * tg3_tx(), we update tx index before checking for
7853                  * netif_tx_queue_stopped().
7854                  */
7855                 smp_mb();
7856                 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7857                         return NETDEV_TX_BUSY;
7858
7859                 netif_tx_wake_queue(txq);
7860         }
7861
7862         segs = skb_gso_segment(skb, tp->dev->features &
7863                                     ~(NETIF_F_TSO | NETIF_F_TSO6));
7864         if (IS_ERR(segs) || !segs)
7865                 goto tg3_tso_bug_end;
7866
7867         do {
7868                 nskb = segs;
7869                 segs = segs->next;
7870                 nskb->next = NULL;
7871                 tg3_start_xmit(nskb, tp->dev);
7872         } while (segs);
7873
7874 tg3_tso_bug_end:
7875         dev_kfree_skb_any(skb);
7876
7877         return NETDEV_TX_OK;
7878 }
7879
7880 /* hard_start_xmit for all devices */
7881 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7882 {
7883         struct tg3 *tp = netdev_priv(dev);
7884         u32 len, entry, base_flags, mss, vlan = 0;
7885         u32 budget;
7886         int i = -1, would_hit_hwbug;
7887         dma_addr_t mapping;
7888         struct tg3_napi *tnapi;
7889         struct netdev_queue *txq;
7890         unsigned int last;
7891         struct iphdr *iph = NULL;
7892         struct tcphdr *tcph = NULL;
7893         __sum16 tcp_csum = 0, ip_csum = 0;
7894         __be16 ip_tot_len = 0;
7895
7896         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7897         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7898         if (tg3_flag(tp, ENABLE_TSS))
7899                 tnapi++;
7900
7901         budget = tg3_tx_avail(tnapi);
7902
7903         /* We are running in BH disabled context with netif_tx_lock
7904          * and TX reclaim runs via tp->napi.poll inside of a software
7905          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7906          * no IRQ context deadlocks to worry about either.  Rejoice!
7907          */
7908         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7909                 if (!netif_tx_queue_stopped(txq)) {
7910                         netif_tx_stop_queue(txq);
7911
7912                         /* This is a hard error, log it. */
7913                         netdev_err(dev,
7914                                    "BUG! Tx Ring full when queue awake!\n");
7915                 }
7916                 return NETDEV_TX_BUSY;
7917         }
7918
7919         entry = tnapi->tx_prod;
7920         base_flags = 0;
7921
7922         mss = skb_shinfo(skb)->gso_size;
7923         if (mss) {
7924                 u32 tcp_opt_len, hdr_len;
7925
7926                 if (skb_cow_head(skb, 0))
7927                         goto drop;
7928
7929                 iph = ip_hdr(skb);
7930                 tcp_opt_len = tcp_optlen(skb);
7931
7932                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7933
7934                 /* HW/FW can not correctly segment packets that have been
7935                  * vlan encapsulated.
7936                  */
7937                 if (skb->protocol == htons(ETH_P_8021Q) ||
7938                     skb->protocol == htons(ETH_P_8021AD)) {
7939                         if (tg3_tso_bug_gso_check(tnapi, skb))
7940                                 return tg3_tso_bug(tp, tnapi, txq, skb);
7941                         goto drop;
7942                 }
7943
7944                 if (!skb_is_gso_v6(skb)) {
7945                         if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7946                             tg3_flag(tp, TSO_BUG)) {
7947                                 if (tg3_tso_bug_gso_check(tnapi, skb))
7948                                         return tg3_tso_bug(tp, tnapi, txq, skb);
7949                                 goto drop;
7950                         }
7951                         ip_csum = iph->check;
7952                         ip_tot_len = iph->tot_len;
7953                         iph->check = 0;
7954                         iph->tot_len = htons(mss + hdr_len);
7955                 }
7956
7957                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7958                                TXD_FLAG_CPU_POST_DMA);
7959
7960                 tcph = tcp_hdr(skb);
7961                 tcp_csum = tcph->check;
7962
7963                 if (tg3_flag(tp, HW_TSO_1) ||
7964                     tg3_flag(tp, HW_TSO_2) ||
7965                     tg3_flag(tp, HW_TSO_3)) {
7966                         tcph->check = 0;
7967                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7968                 } else {
7969                         tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7970                                                          0, IPPROTO_TCP, 0);
7971                 }
7972
7973                 if (tg3_flag(tp, HW_TSO_3)) {
7974                         mss |= (hdr_len & 0xc) << 12;
7975                         if (hdr_len & 0x10)
7976                                 base_flags |= 0x00000010;
7977                         base_flags |= (hdr_len & 0x3e0) << 5;
7978                 } else if (tg3_flag(tp, HW_TSO_2))
7979                         mss |= hdr_len << 9;
7980                 else if (tg3_flag(tp, HW_TSO_1) ||
7981                          tg3_asic_rev(tp) == ASIC_REV_5705) {
7982                         if (tcp_opt_len || iph->ihl > 5) {
7983                                 int tsflags;
7984
7985                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7986                                 mss |= (tsflags << 11);
7987                         }
7988                 } else {
7989                         if (tcp_opt_len || iph->ihl > 5) {
7990                                 int tsflags;
7991
7992                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7993                                 base_flags |= tsflags << 12;
7994                         }
7995                 }
7996         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
7997                 /* HW/FW can not correctly checksum packets that have been
7998                  * vlan encapsulated.
7999                  */
8000                 if (skb->protocol == htons(ETH_P_8021Q) ||
8001                     skb->protocol == htons(ETH_P_8021AD)) {
8002                         if (skb_checksum_help(skb))
8003                                 goto drop;
8004                 } else  {
8005                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
8006                 }
8007         }
8008
8009         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8010             !mss && skb->len > VLAN_ETH_FRAME_LEN)
8011                 base_flags |= TXD_FLAG_JMB_PKT;
8012
8013         if (skb_vlan_tag_present(skb)) {
8014                 base_flags |= TXD_FLAG_VLAN;
8015                 vlan = skb_vlan_tag_get(skb);
8016         }
8017
8018         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8019             tg3_flag(tp, TX_TSTAMP_EN)) {
8020                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8021                 base_flags |= TXD_FLAG_HWTSTAMP;
8022         }
8023
8024         len = skb_headlen(skb);
8025
8026         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8027         if (pci_dma_mapping_error(tp->pdev, mapping))
8028                 goto drop;
8029
8030
8031         tnapi->tx_buffers[entry].skb = skb;
8032         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8033
8034         would_hit_hwbug = 0;
8035
8036         if (tg3_flag(tp, 5701_DMA_BUG))
8037                 would_hit_hwbug = 1;
8038
8039         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8040                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8041                             mss, vlan)) {
8042                 would_hit_hwbug = 1;
8043         } else if (skb_shinfo(skb)->nr_frags > 0) {
8044                 u32 tmp_mss = mss;
8045
8046                 if (!tg3_flag(tp, HW_TSO_1) &&
8047                     !tg3_flag(tp, HW_TSO_2) &&
8048                     !tg3_flag(tp, HW_TSO_3))
8049                         tmp_mss = 0;
8050
8051                 /* Now loop through additional data
8052                  * fragments, and queue them.
8053                  */
8054                 last = skb_shinfo(skb)->nr_frags - 1;
8055                 for (i = 0; i <= last; i++) {
8056                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8057
8058                         len = skb_frag_size(frag);
8059                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8060                                                    len, DMA_TO_DEVICE);
8061
8062                         tnapi->tx_buffers[entry].skb = NULL;
8063                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8064                                            mapping);
8065                         if (dma_mapping_error(&tp->pdev->dev, mapping))
8066                                 goto dma_error;
8067
8068                         if (!budget ||
8069                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8070                                             len, base_flags |
8071                                             ((i == last) ? TXD_FLAG_END : 0),
8072                                             tmp_mss, vlan)) {
8073                                 would_hit_hwbug = 1;
8074                                 break;
8075                         }
8076                 }
8077         }
8078
8079         if (would_hit_hwbug) {
8080                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8081
8082                 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8083                         /* If it's a TSO packet, do GSO instead of
8084                          * allocating and copying to a large linear SKB
8085                          */
8086                         if (ip_tot_len) {
8087                                 iph->check = ip_csum;
8088                                 iph->tot_len = ip_tot_len;
8089                         }
8090                         tcph->check = tcp_csum;
8091                         return tg3_tso_bug(tp, tnapi, txq, skb);
8092                 }
8093
8094                 /* If the workaround fails due to memory/mapping
8095                  * failure, silently drop this packet.
8096                  */
8097                 entry = tnapi->tx_prod;
8098                 budget = tg3_tx_avail(tnapi);
8099                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8100                                                 base_flags, mss, vlan))
8101                         goto drop_nofree;
8102         }
8103
8104         skb_tx_timestamp(skb);
8105         netdev_tx_sent_queue(txq, skb->len);
8106
8107         /* Sync BD data before updating mailbox */
8108         wmb();
8109
8110         tnapi->tx_prod = entry;
8111         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8112                 netif_tx_stop_queue(txq);
8113
8114                 /* netif_tx_stop_queue() must be done before checking
8115                  * checking tx index in tg3_tx_avail() below, because in
8116                  * tg3_tx(), we update tx index before checking for
8117                  * netif_tx_queue_stopped().
8118                  */
8119                 smp_mb();
8120                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8121                         netif_tx_wake_queue(txq);
8122         }
8123
8124         if (!skb->xmit_more || netif_xmit_stopped(txq)) {
8125                 /* Packets are ready, update Tx producer idx on card. */
8126                 tw32_tx_mbox(tnapi->prodmbox, entry);
8127                 mmiowb();
8128         }
8129
8130         return NETDEV_TX_OK;
8131
8132 dma_error:
8133         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8134         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8135 drop:
8136         dev_kfree_skb_any(skb);
8137 drop_nofree:
8138         tp->tx_dropped++;
8139         return NETDEV_TX_OK;
8140 }
8141
8142 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8143 {
8144         if (enable) {
8145                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8146                                   MAC_MODE_PORT_MODE_MASK);
8147
8148                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8149
8150                 if (!tg3_flag(tp, 5705_PLUS))
8151                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8152
8153                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8154                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8155                 else
8156                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8157         } else {
8158                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8159
8160                 if (tg3_flag(tp, 5705_PLUS) ||
8161                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8162                     tg3_asic_rev(tp) == ASIC_REV_5700)
8163                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8164         }
8165
8166         tw32(MAC_MODE, tp->mac_mode);
8167         udelay(40);
8168 }
8169
8170 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8171 {
8172         u32 val, bmcr, mac_mode, ptest = 0;
8173
8174         tg3_phy_toggle_apd(tp, false);
8175         tg3_phy_toggle_automdix(tp, false);
8176
8177         if (extlpbk && tg3_phy_set_extloopbk(tp))
8178                 return -EIO;
8179
8180         bmcr = BMCR_FULLDPLX;
8181         switch (speed) {
8182         case SPEED_10:
8183                 break;
8184         case SPEED_100:
8185                 bmcr |= BMCR_SPEED100;
8186                 break;
8187         case SPEED_1000:
8188         default:
8189                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8190                         speed = SPEED_100;
8191                         bmcr |= BMCR_SPEED100;
8192                 } else {
8193                         speed = SPEED_1000;
8194                         bmcr |= BMCR_SPEED1000;
8195                 }
8196         }
8197
8198         if (extlpbk) {
8199                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8200                         tg3_readphy(tp, MII_CTRL1000, &val);
8201                         val |= CTL1000_AS_MASTER |
8202                                CTL1000_ENABLE_MASTER;
8203                         tg3_writephy(tp, MII_CTRL1000, val);
8204                 } else {
8205                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8206                                 MII_TG3_FET_PTEST_TRIM_2;
8207                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8208                 }
8209         } else
8210                 bmcr |= BMCR_LOOPBACK;
8211
8212         tg3_writephy(tp, MII_BMCR, bmcr);
8213
8214         /* The write needs to be flushed for the FETs */
8215         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8216                 tg3_readphy(tp, MII_BMCR, &bmcr);
8217
8218         udelay(40);
8219
8220         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8221             tg3_asic_rev(tp) == ASIC_REV_5785) {
8222                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8223                              MII_TG3_FET_PTEST_FRC_TX_LINK |
8224                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
8225
8226                 /* The write needs to be flushed for the AC131 */
8227                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8228         }
8229
8230         /* Reset to prevent losing 1st rx packet intermittently */
8231         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8232             tg3_flag(tp, 5780_CLASS)) {
8233                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8234                 udelay(10);
8235                 tw32_f(MAC_RX_MODE, tp->rx_mode);
8236         }
8237
8238         mac_mode = tp->mac_mode &
8239                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8240         if (speed == SPEED_1000)
8241                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8242         else
8243                 mac_mode |= MAC_MODE_PORT_MODE_MII;
8244
8245         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8246                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8247
8248                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8249                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8250                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8251                         mac_mode |= MAC_MODE_LINK_POLARITY;
8252
8253                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8254                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8255         }
8256
8257         tw32(MAC_MODE, mac_mode);
8258         udelay(40);
8259
8260         return 0;
8261 }
8262
8263 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8264 {
8265         struct tg3 *tp = netdev_priv(dev);
8266
8267         if (features & NETIF_F_LOOPBACK) {
8268                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8269                         return;
8270
8271                 spin_lock_bh(&tp->lock);
8272                 tg3_mac_loopback(tp, true);
8273                 netif_carrier_on(tp->dev);
8274                 spin_unlock_bh(&tp->lock);
8275                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8276         } else {
8277                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8278                         return;
8279
8280                 spin_lock_bh(&tp->lock);
8281                 tg3_mac_loopback(tp, false);
8282                 /* Force link status check */
8283                 tg3_setup_phy(tp, true);
8284                 spin_unlock_bh(&tp->lock);
8285                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8286         }
8287 }
8288
8289 static netdev_features_t tg3_fix_features(struct net_device *dev,
8290         netdev_features_t features)
8291 {
8292         struct tg3 *tp = netdev_priv(dev);
8293
8294         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8295                 features &= ~NETIF_F_ALL_TSO;
8296
8297         return features;
8298 }
8299
8300 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8301 {
8302         netdev_features_t changed = dev->features ^ features;
8303
8304         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8305                 tg3_set_loopback(dev, features);
8306
8307         return 0;
8308 }
8309
8310 static void tg3_rx_prodring_free(struct tg3 *tp,
8311                                  struct tg3_rx_prodring_set *tpr)
8312 {
8313         int i;
8314
8315         if (tpr != &tp->napi[0].prodring) {
8316                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8317                      i = (i + 1) & tp->rx_std_ring_mask)
8318                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8319                                         tp->rx_pkt_map_sz);
8320
8321                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8322                         for (i = tpr->rx_jmb_cons_idx;
8323                              i != tpr->rx_jmb_prod_idx;
8324                              i = (i + 1) & tp->rx_jmb_ring_mask) {
8325                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8326                                                 TG3_RX_JMB_MAP_SZ);
8327                         }
8328                 }
8329
8330                 return;
8331         }
8332
8333         for (i = 0; i <= tp->rx_std_ring_mask; i++)
8334                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8335                                 tp->rx_pkt_map_sz);
8336
8337         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8338                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8339                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8340                                         TG3_RX_JMB_MAP_SZ);
8341         }
8342 }
8343
8344 /* Initialize rx rings for packet processing.
8345  *
8346  * The chip has been shut down and the driver detached from
8347  * the networking, so no interrupts or new tx packets will
8348  * end up in the driver.  tp->{tx,}lock are held and thus
8349  * we may not sleep.
8350  */
8351 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8352                                  struct tg3_rx_prodring_set *tpr)
8353 {
8354         u32 i, rx_pkt_dma_sz;
8355
8356         tpr->rx_std_cons_idx = 0;
8357         tpr->rx_std_prod_idx = 0;
8358         tpr->rx_jmb_cons_idx = 0;
8359         tpr->rx_jmb_prod_idx = 0;
8360
8361         if (tpr != &tp->napi[0].prodring) {
8362                 memset(&tpr->rx_std_buffers[0], 0,
8363                        TG3_RX_STD_BUFF_RING_SIZE(tp));
8364                 if (tpr->rx_jmb_buffers)
8365                         memset(&tpr->rx_jmb_buffers[0], 0,
8366                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
8367                 goto done;
8368         }
8369
8370         /* Zero out all descriptors. */
8371         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8372
8373         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8374         if (tg3_flag(tp, 5780_CLASS) &&
8375             tp->dev->mtu > ETH_DATA_LEN)
8376                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8377         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8378
8379         /* Initialize invariants of the rings, we only set this
8380          * stuff once.  This works because the card does not
8381          * write into the rx buffer posting rings.
8382          */
8383         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8384                 struct tg3_rx_buffer_desc *rxd;
8385
8386                 rxd = &tpr->rx_std[i];
8387                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8388                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8389                 rxd->opaque = (RXD_OPAQUE_RING_STD |
8390                                (i << RXD_OPAQUE_INDEX_SHIFT));
8391         }
8392
8393         /* Now allocate fresh SKBs for each rx ring. */
8394         for (i = 0; i < tp->rx_pending; i++) {
8395                 unsigned int frag_size;
8396
8397                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8398                                       &frag_size) < 0) {
8399                         netdev_warn(tp->dev,
8400                                     "Using a smaller RX standard ring. Only "
8401                                     "%d out of %d buffers were allocated "
8402                                     "successfully\n", i, tp->rx_pending);
8403                         if (i == 0)
8404                                 goto initfail;
8405                         tp->rx_pending = i;
8406                         break;
8407                 }
8408         }
8409
8410         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8411                 goto done;
8412
8413         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8414
8415         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8416                 goto done;
8417
8418         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8419                 struct tg3_rx_buffer_desc *rxd;
8420
8421                 rxd = &tpr->rx_jmb[i].std;
8422                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8423                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8424                                   RXD_FLAG_JUMBO;
8425                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8426                        (i << RXD_OPAQUE_INDEX_SHIFT));
8427         }
8428
8429         for (i = 0; i < tp->rx_jumbo_pending; i++) {
8430                 unsigned int frag_size;
8431
8432                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8433                                       &frag_size) < 0) {
8434                         netdev_warn(tp->dev,
8435                                     "Using a smaller RX jumbo ring. Only %d "
8436                                     "out of %d buffers were allocated "
8437                                     "successfully\n", i, tp->rx_jumbo_pending);
8438                         if (i == 0)
8439                                 goto initfail;
8440                         tp->rx_jumbo_pending = i;
8441                         break;
8442                 }
8443         }
8444
8445 done:
8446         return 0;
8447
8448 initfail:
8449         tg3_rx_prodring_free(tp, tpr);
8450         return -ENOMEM;
8451 }
8452
8453 static void tg3_rx_prodring_fini(struct tg3 *tp,
8454                                  struct tg3_rx_prodring_set *tpr)
8455 {
8456         kfree(tpr->rx_std_buffers);
8457         tpr->rx_std_buffers = NULL;
8458         kfree(tpr->rx_jmb_buffers);
8459         tpr->rx_jmb_buffers = NULL;
8460         if (tpr->rx_std) {
8461                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8462                                   tpr->rx_std, tpr->rx_std_mapping);
8463                 tpr->rx_std = NULL;
8464         }
8465         if (tpr->rx_jmb) {
8466                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8467                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
8468                 tpr->rx_jmb = NULL;
8469         }
8470 }
8471
8472 static int tg3_rx_prodring_init(struct tg3 *tp,
8473                                 struct tg3_rx_prodring_set *tpr)
8474 {
8475         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8476                                       GFP_KERNEL);
8477         if (!tpr->rx_std_buffers)
8478                 return -ENOMEM;
8479
8480         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8481                                          TG3_RX_STD_RING_BYTES(tp),
8482                                          &tpr->rx_std_mapping,
8483                                          GFP_KERNEL);
8484         if (!tpr->rx_std)
8485                 goto err_out;
8486
8487         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8488                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8489                                               GFP_KERNEL);
8490                 if (!tpr->rx_jmb_buffers)
8491                         goto err_out;
8492
8493                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8494                                                  TG3_RX_JMB_RING_BYTES(tp),
8495                                                  &tpr->rx_jmb_mapping,
8496                                                  GFP_KERNEL);
8497                 if (!tpr->rx_jmb)
8498                         goto err_out;
8499         }
8500
8501         return 0;
8502
8503 err_out:
8504         tg3_rx_prodring_fini(tp, tpr);
8505         return -ENOMEM;
8506 }
8507
8508 /* Free up pending packets in all rx/tx rings.
8509  *
8510  * The chip has been shut down and the driver detached from
8511  * the networking, so no interrupts or new tx packets will
8512  * end up in the driver.  tp->{tx,}lock is not held and we are not
8513  * in an interrupt context and thus may sleep.
8514  */
8515 static void tg3_free_rings(struct tg3 *tp)
8516 {
8517         int i, j;
8518
8519         for (j = 0; j < tp->irq_cnt; j++) {
8520                 struct tg3_napi *tnapi = &tp->napi[j];
8521
8522                 tg3_rx_prodring_free(tp, &tnapi->prodring);
8523
8524                 if (!tnapi->tx_buffers)
8525                         continue;
8526
8527                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8528                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8529
8530                         if (!skb)
8531                                 continue;
8532
8533                         tg3_tx_skb_unmap(tnapi, i,
8534                                          skb_shinfo(skb)->nr_frags - 1);
8535
8536                         dev_kfree_skb_any(skb);
8537                 }
8538                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8539         }
8540 }
8541
8542 /* Initialize tx/rx rings for packet processing.
8543  *
8544  * The chip has been shut down and the driver detached from
8545  * the networking, so no interrupts or new tx packets will
8546  * end up in the driver.  tp->{tx,}lock are held and thus
8547  * we may not sleep.
8548  */
8549 static int tg3_init_rings(struct tg3 *tp)
8550 {
8551         int i;
8552
8553         /* Free up all the SKBs. */
8554         tg3_free_rings(tp);
8555
8556         for (i = 0; i < tp->irq_cnt; i++) {
8557                 struct tg3_napi *tnapi = &tp->napi[i];
8558
8559                 tnapi->last_tag = 0;
8560                 tnapi->last_irq_tag = 0;
8561                 tnapi->hw_status->status = 0;
8562                 tnapi->hw_status->status_tag = 0;
8563                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8564
8565                 tnapi->tx_prod = 0;
8566                 tnapi->tx_cons = 0;
8567                 if (tnapi->tx_ring)
8568                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8569
8570                 tnapi->rx_rcb_ptr = 0;
8571                 if (tnapi->rx_rcb)
8572                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8573
8574                 if (tnapi->prodring.rx_std &&
8575                     tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8576                         tg3_free_rings(tp);
8577                         return -ENOMEM;
8578                 }
8579         }
8580
8581         return 0;
8582 }
8583
8584 static void tg3_mem_tx_release(struct tg3 *tp)
8585 {
8586         int i;
8587
8588         for (i = 0; i < tp->irq_max; i++) {
8589                 struct tg3_napi *tnapi = &tp->napi[i];
8590
8591                 if (tnapi->tx_ring) {
8592                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8593                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
8594                         tnapi->tx_ring = NULL;
8595                 }
8596
8597                 kfree(tnapi->tx_buffers);
8598                 tnapi->tx_buffers = NULL;
8599         }
8600 }
8601
8602 static int tg3_mem_tx_acquire(struct tg3 *tp)
8603 {
8604         int i;
8605         struct tg3_napi *tnapi = &tp->napi[0];
8606
8607         /* If multivector TSS is enabled, vector 0 does not handle
8608          * tx interrupts.  Don't allocate any resources for it.
8609          */
8610         if (tg3_flag(tp, ENABLE_TSS))
8611                 tnapi++;
8612
8613         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8614                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8615                                             TG3_TX_RING_SIZE, GFP_KERNEL);
8616                 if (!tnapi->tx_buffers)
8617                         goto err_out;
8618
8619                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8620                                                     TG3_TX_RING_BYTES,
8621                                                     &tnapi->tx_desc_mapping,
8622                                                     GFP_KERNEL);
8623                 if (!tnapi->tx_ring)
8624                         goto err_out;
8625         }
8626
8627         return 0;
8628
8629 err_out:
8630         tg3_mem_tx_release(tp);
8631         return -ENOMEM;
8632 }
8633
8634 static void tg3_mem_rx_release(struct tg3 *tp)
8635 {
8636         int i;
8637
8638         for (i = 0; i < tp->irq_max; i++) {
8639                 struct tg3_napi *tnapi = &tp->napi[i];
8640
8641                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8642
8643                 if (!tnapi->rx_rcb)
8644                         continue;
8645
8646                 dma_free_coherent(&tp->pdev->dev,
8647                                   TG3_RX_RCB_RING_BYTES(tp),
8648                                   tnapi->rx_rcb,
8649                                   tnapi->rx_rcb_mapping);
8650                 tnapi->rx_rcb = NULL;
8651         }
8652 }
8653
8654 static int tg3_mem_rx_acquire(struct tg3 *tp)
8655 {
8656         unsigned int i, limit;
8657
8658         limit = tp->rxq_cnt;
8659
8660         /* If RSS is enabled, we need a (dummy) producer ring
8661          * set on vector zero.  This is the true hw prodring.
8662          */
8663         if (tg3_flag(tp, ENABLE_RSS))
8664                 limit++;
8665
8666         for (i = 0; i < limit; i++) {
8667                 struct tg3_napi *tnapi = &tp->napi[i];
8668
8669                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8670                         goto err_out;
8671
8672                 /* If multivector RSS is enabled, vector 0
8673                  * does not handle rx or tx interrupts.
8674                  * Don't allocate any resources for it.
8675                  */
8676                 if (!i && tg3_flag(tp, ENABLE_RSS))
8677                         continue;
8678
8679                 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8680                                                     TG3_RX_RCB_RING_BYTES(tp),
8681                                                     &tnapi->rx_rcb_mapping,
8682                                                     GFP_KERNEL);
8683                 if (!tnapi->rx_rcb)
8684                         goto err_out;
8685         }
8686
8687         return 0;
8688
8689 err_out:
8690         tg3_mem_rx_release(tp);
8691         return -ENOMEM;
8692 }
8693
8694 /*
8695  * Must not be invoked with interrupt sources disabled and
8696  * the hardware shutdown down.
8697  */
8698 static void tg3_free_consistent(struct tg3 *tp)
8699 {
8700         int i;
8701
8702         for (i = 0; i < tp->irq_cnt; i++) {
8703                 struct tg3_napi *tnapi = &tp->napi[i];
8704
8705                 if (tnapi->hw_status) {
8706                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8707                                           tnapi->hw_status,
8708                                           tnapi->status_mapping);
8709                         tnapi->hw_status = NULL;
8710                 }
8711         }
8712
8713         tg3_mem_rx_release(tp);
8714         tg3_mem_tx_release(tp);
8715
8716         /* tp->hw_stats can be referenced safely:
8717          *     1. under rtnl_lock
8718          *     2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8719          */
8720         if (tp->hw_stats) {
8721                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8722                                   tp->hw_stats, tp->stats_mapping);
8723                 tp->hw_stats = NULL;
8724         }
8725 }
8726
8727 /*
8728  * Must not be invoked with interrupt sources disabled and
8729  * the hardware shutdown down.  Can sleep.
8730  */
8731 static int tg3_alloc_consistent(struct tg3 *tp)
8732 {
8733         int i;
8734
8735         tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8736                                            sizeof(struct tg3_hw_stats),
8737                                            &tp->stats_mapping, GFP_KERNEL);
8738         if (!tp->hw_stats)
8739                 goto err_out;
8740
8741         for (i = 0; i < tp->irq_cnt; i++) {
8742                 struct tg3_napi *tnapi = &tp->napi[i];
8743                 struct tg3_hw_status *sblk;
8744
8745                 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8746                                                        TG3_HW_STATUS_SIZE,
8747                                                        &tnapi->status_mapping,
8748                                                        GFP_KERNEL);
8749                 if (!tnapi->hw_status)
8750                         goto err_out;
8751
8752                 sblk = tnapi->hw_status;
8753
8754                 if (tg3_flag(tp, ENABLE_RSS)) {
8755                         u16 *prodptr = NULL;
8756
8757                         /*
8758                          * When RSS is enabled, the status block format changes
8759                          * slightly.  The "rx_jumbo_consumer", "reserved",
8760                          * and "rx_mini_consumer" members get mapped to the
8761                          * other three rx return ring producer indexes.
8762                          */
8763                         switch (i) {
8764                         case 1:
8765                                 prodptr = &sblk->idx[0].rx_producer;
8766                                 break;
8767                         case 2:
8768                                 prodptr = &sblk->rx_jumbo_consumer;
8769                                 break;
8770                         case 3:
8771                                 prodptr = &sblk->reserved;
8772                                 break;
8773                         case 4:
8774                                 prodptr = &sblk->rx_mini_consumer;
8775                                 break;
8776                         }
8777                         tnapi->rx_rcb_prod_idx = prodptr;
8778                 } else {
8779                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8780                 }
8781         }
8782
8783         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8784                 goto err_out;
8785
8786         return 0;
8787
8788 err_out:
8789         tg3_free_consistent(tp);
8790         return -ENOMEM;
8791 }
8792
8793 #define MAX_WAIT_CNT 1000
8794
8795 /* To stop a block, clear the enable bit and poll till it
8796  * clears.  tp->lock is held.
8797  */
8798 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8799 {
8800         unsigned int i;
8801         u32 val;
8802
8803         if (tg3_flag(tp, 5705_PLUS)) {
8804                 switch (ofs) {
8805                 case RCVLSC_MODE:
8806                 case DMAC_MODE:
8807                 case MBFREE_MODE:
8808                 case BUFMGR_MODE:
8809                 case MEMARB_MODE:
8810                         /* We can't enable/disable these bits of the
8811                          * 5705/5750, just say success.
8812                          */
8813                         return 0;
8814
8815                 default:
8816                         break;
8817                 }
8818         }
8819
8820         val = tr32(ofs);
8821         val &= ~enable_bit;
8822         tw32_f(ofs, val);
8823
8824         for (i = 0; i < MAX_WAIT_CNT; i++) {
8825                 if (pci_channel_offline(tp->pdev)) {
8826                         dev_err(&tp->pdev->dev,
8827                                 "tg3_stop_block device offline, "
8828                                 "ofs=%lx enable_bit=%x\n",
8829                                 ofs, enable_bit);
8830                         return -ENODEV;
8831                 }
8832
8833                 udelay(100);
8834                 val = tr32(ofs);
8835                 if ((val & enable_bit) == 0)
8836                         break;
8837         }
8838
8839         if (i == MAX_WAIT_CNT && !silent) {
8840                 dev_err(&tp->pdev->dev,
8841                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8842                         ofs, enable_bit);
8843                 return -ENODEV;
8844         }
8845
8846         return 0;
8847 }
8848
8849 /* tp->lock is held. */
8850 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8851 {
8852         int i, err;
8853
8854         tg3_disable_ints(tp);
8855
8856         if (pci_channel_offline(tp->pdev)) {
8857                 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8858                 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8859                 err = -ENODEV;
8860                 goto err_no_dev;
8861         }
8862
8863         tp->rx_mode &= ~RX_MODE_ENABLE;
8864         tw32_f(MAC_RX_MODE, tp->rx_mode);
8865         udelay(10);
8866
8867         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8868         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8869         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8870         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8871         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8872         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8873
8874         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8875         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8876         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8877         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8878         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8879         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8880         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8881
8882         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8883         tw32_f(MAC_MODE, tp->mac_mode);
8884         udelay(40);
8885
8886         tp->tx_mode &= ~TX_MODE_ENABLE;
8887         tw32_f(MAC_TX_MODE, tp->tx_mode);
8888
8889         for (i = 0; i < MAX_WAIT_CNT; i++) {
8890                 udelay(100);
8891                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8892                         break;
8893         }
8894         if (i >= MAX_WAIT_CNT) {
8895                 dev_err(&tp->pdev->dev,
8896                         "%s timed out, TX_MODE_ENABLE will not clear "
8897                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8898                 err |= -ENODEV;
8899         }
8900
8901         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8902         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8903         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8904
8905         tw32(FTQ_RESET, 0xffffffff);
8906         tw32(FTQ_RESET, 0x00000000);
8907
8908         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8909         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8910
8911 err_no_dev:
8912         for (i = 0; i < tp->irq_cnt; i++) {
8913                 struct tg3_napi *tnapi = &tp->napi[i];
8914                 if (tnapi->hw_status)
8915                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8916         }
8917
8918         return err;
8919 }
8920
8921 /* Save PCI command register before chip reset */
8922 static void tg3_save_pci_state(struct tg3 *tp)
8923 {
8924         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8925 }
8926
8927 /* Restore PCI state after chip reset */
8928 static void tg3_restore_pci_state(struct tg3 *tp)
8929 {
8930         u32 val;
8931
8932         /* Re-enable indirect register accesses. */
8933         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8934                                tp->misc_host_ctrl);
8935
8936         /* Set MAX PCI retry to zero. */
8937         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8938         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8939             tg3_flag(tp, PCIX_MODE))
8940                 val |= PCISTATE_RETRY_SAME_DMA;
8941         /* Allow reads and writes to the APE register and memory space. */
8942         if (tg3_flag(tp, ENABLE_APE))
8943                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8944                        PCISTATE_ALLOW_APE_SHMEM_WR |
8945                        PCISTATE_ALLOW_APE_PSPACE_WR;
8946         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8947
8948         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8949
8950         if (!tg3_flag(tp, PCI_EXPRESS)) {
8951                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8952                                       tp->pci_cacheline_sz);
8953                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8954                                       tp->pci_lat_timer);
8955         }
8956
8957         /* Make sure PCI-X relaxed ordering bit is clear. */
8958         if (tg3_flag(tp, PCIX_MODE)) {
8959                 u16 pcix_cmd;
8960
8961                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8962                                      &pcix_cmd);
8963                 pcix_cmd &= ~PCI_X_CMD_ERO;
8964                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8965                                       pcix_cmd);
8966         }
8967
8968         if (tg3_flag(tp, 5780_CLASS)) {
8969
8970                 /* Chip reset on 5780 will reset MSI enable bit,
8971                  * so need to restore it.
8972                  */
8973                 if (tg3_flag(tp, USING_MSI)) {
8974                         u16 ctrl;
8975
8976                         pci_read_config_word(tp->pdev,
8977                                              tp->msi_cap + PCI_MSI_FLAGS,
8978                                              &ctrl);
8979                         pci_write_config_word(tp->pdev,
8980                                               tp->msi_cap + PCI_MSI_FLAGS,
8981                                               ctrl | PCI_MSI_FLAGS_ENABLE);
8982                         val = tr32(MSGINT_MODE);
8983                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8984                 }
8985         }
8986 }
8987
8988 static void tg3_override_clk(struct tg3 *tp)
8989 {
8990         u32 val;
8991
8992         switch (tg3_asic_rev(tp)) {
8993         case ASIC_REV_5717:
8994                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
8995                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
8996                      TG3_CPMU_MAC_ORIDE_ENABLE);
8997                 break;
8998
8999         case ASIC_REV_5719:
9000         case ASIC_REV_5720:
9001                 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9002                 break;
9003
9004         default:
9005                 return;
9006         }
9007 }
9008
9009 static void tg3_restore_clk(struct tg3 *tp)
9010 {
9011         u32 val;
9012
9013         switch (tg3_asic_rev(tp)) {
9014         case ASIC_REV_5717:
9015                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9016                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9017                      val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9018                 break;
9019
9020         case ASIC_REV_5719:
9021         case ASIC_REV_5720:
9022                 val = tr32(TG3_CPMU_CLCK_ORIDE);
9023                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9024                 break;
9025
9026         default:
9027                 return;
9028         }
9029 }
9030
9031 /* tp->lock is held. */
9032 static int tg3_chip_reset(struct tg3 *tp)
9033         __releases(tp->lock)
9034         __acquires(tp->lock)
9035 {
9036         u32 val;
9037         void (*write_op)(struct tg3 *, u32, u32);
9038         int i, err;
9039
9040         if (!pci_device_is_present(tp->pdev))
9041                 return -ENODEV;
9042
9043         tg3_nvram_lock(tp);
9044
9045         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9046
9047         /* No matching tg3_nvram_unlock() after this because
9048          * chip reset below will undo the nvram lock.
9049          */
9050         tp->nvram_lock_cnt = 0;
9051
9052         /* GRC_MISC_CFG core clock reset will clear the memory
9053          * enable bit in PCI register 4 and the MSI enable bit
9054          * on some chips, so we save relevant registers here.
9055          */
9056         tg3_save_pci_state(tp);
9057
9058         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9059             tg3_flag(tp, 5755_PLUS))
9060                 tw32(GRC_FASTBOOT_PC, 0);
9061
9062         /*
9063          * We must avoid the readl() that normally takes place.
9064          * It locks machines, causes machine checks, and other
9065          * fun things.  So, temporarily disable the 5701
9066          * hardware workaround, while we do the reset.
9067          */
9068         write_op = tp->write32;
9069         if (write_op == tg3_write_flush_reg32)
9070                 tp->write32 = tg3_write32;
9071
9072         /* Prevent the irq handler from reading or writing PCI registers
9073          * during chip reset when the memory enable bit in the PCI command
9074          * register may be cleared.  The chip does not generate interrupt
9075          * at this time, but the irq handler may still be called due to irq
9076          * sharing or irqpoll.
9077          */
9078         tg3_flag_set(tp, CHIP_RESETTING);
9079         for (i = 0; i < tp->irq_cnt; i++) {
9080                 struct tg3_napi *tnapi = &tp->napi[i];
9081                 if (tnapi->hw_status) {
9082                         tnapi->hw_status->status = 0;
9083                         tnapi->hw_status->status_tag = 0;
9084                 }
9085                 tnapi->last_tag = 0;
9086                 tnapi->last_irq_tag = 0;
9087         }
9088         smp_mb();
9089
9090         tg3_full_unlock(tp);
9091
9092         for (i = 0; i < tp->irq_cnt; i++)
9093                 synchronize_irq(tp->napi[i].irq_vec);
9094
9095         tg3_full_lock(tp, 0);
9096
9097         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9098                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9099                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9100         }
9101
9102         /* do the reset */
9103         val = GRC_MISC_CFG_CORECLK_RESET;
9104
9105         if (tg3_flag(tp, PCI_EXPRESS)) {
9106                 /* Force PCIe 1.0a mode */
9107                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9108                     !tg3_flag(tp, 57765_PLUS) &&
9109                     tr32(TG3_PCIE_PHY_TSTCTL) ==
9110                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9111                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9112
9113                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9114                         tw32(GRC_MISC_CFG, (1 << 29));
9115                         val |= (1 << 29);
9116                 }
9117         }
9118
9119         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9120                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9121                 tw32(GRC_VCPU_EXT_CTRL,
9122                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9123         }
9124
9125         /* Set the clock to the highest frequency to avoid timeouts. With link
9126          * aware mode, the clock speed could be slow and bootcode does not
9127          * complete within the expected time. Override the clock to allow the
9128          * bootcode to finish sooner and then restore it.
9129          */
9130         tg3_override_clk(tp);
9131
9132         /* Manage gphy power for all CPMU absent PCIe devices. */
9133         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9134                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9135
9136         tw32(GRC_MISC_CFG, val);
9137
9138         /* restore 5701 hardware bug workaround write method */
9139         tp->write32 = write_op;
9140
9141         /* Unfortunately, we have to delay before the PCI read back.
9142          * Some 575X chips even will not respond to a PCI cfg access
9143          * when the reset command is given to the chip.
9144          *
9145          * How do these hardware designers expect things to work
9146          * properly if the PCI write is posted for a long period
9147          * of time?  It is always necessary to have some method by
9148          * which a register read back can occur to push the write
9149          * out which does the reset.
9150          *
9151          * For most tg3 variants the trick below was working.
9152          * Ho hum...
9153          */
9154         udelay(120);
9155
9156         /* Flush PCI posted writes.  The normal MMIO registers
9157          * are inaccessible at this time so this is the only
9158          * way to make this reliably (actually, this is no longer
9159          * the case, see above).  I tried to use indirect
9160          * register read/write but this upset some 5701 variants.
9161          */
9162         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9163
9164         udelay(120);
9165
9166         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9167                 u16 val16;
9168
9169                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9170                         int j;
9171                         u32 cfg_val;
9172
9173                         /* Wait for link training to complete.  */
9174                         for (j = 0; j < 5000; j++)
9175                                 udelay(100);
9176
9177                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9178                         pci_write_config_dword(tp->pdev, 0xc4,
9179                                                cfg_val | (1 << 15));
9180                 }
9181
9182                 /* Clear the "no snoop" and "relaxed ordering" bits. */
9183                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9184                 /*
9185                  * Older PCIe devices only support the 128 byte
9186                  * MPS setting.  Enforce the restriction.
9187                  */
9188                 if (!tg3_flag(tp, CPMU_PRESENT))
9189                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9190                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9191
9192                 /* Clear error status */
9193                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9194                                       PCI_EXP_DEVSTA_CED |
9195                                       PCI_EXP_DEVSTA_NFED |
9196                                       PCI_EXP_DEVSTA_FED |
9197                                       PCI_EXP_DEVSTA_URD);
9198         }
9199
9200         tg3_restore_pci_state(tp);
9201
9202         tg3_flag_clear(tp, CHIP_RESETTING);
9203         tg3_flag_clear(tp, ERROR_PROCESSED);
9204
9205         val = 0;
9206         if (tg3_flag(tp, 5780_CLASS))
9207                 val = tr32(MEMARB_MODE);
9208         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9209
9210         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9211                 tg3_stop_fw(tp);
9212                 tw32(0x5000, 0x400);
9213         }
9214
9215         if (tg3_flag(tp, IS_SSB_CORE)) {
9216                 /*
9217                  * BCM4785: In order to avoid repercussions from using
9218                  * potentially defective internal ROM, stop the Rx RISC CPU,
9219                  * which is not required.
9220                  */
9221                 tg3_stop_fw(tp);
9222                 tg3_halt_cpu(tp, RX_CPU_BASE);
9223         }
9224
9225         err = tg3_poll_fw(tp);
9226         if (err)
9227                 return err;
9228
9229         tw32(GRC_MODE, tp->grc_mode);
9230
9231         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9232                 val = tr32(0xc4);
9233
9234                 tw32(0xc4, val | (1 << 15));
9235         }
9236
9237         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9238             tg3_asic_rev(tp) == ASIC_REV_5705) {
9239                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9240                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9241                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9242                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9243         }
9244
9245         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9246                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9247                 val = tp->mac_mode;
9248         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9249                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9250                 val = tp->mac_mode;
9251         } else
9252                 val = 0;
9253
9254         tw32_f(MAC_MODE, val);
9255         udelay(40);
9256
9257         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9258
9259         tg3_mdio_start(tp);
9260
9261         if (tg3_flag(tp, PCI_EXPRESS) &&
9262             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9263             tg3_asic_rev(tp) != ASIC_REV_5785 &&
9264             !tg3_flag(tp, 57765_PLUS)) {
9265                 val = tr32(0x7c00);
9266
9267                 tw32(0x7c00, val | (1 << 25));
9268         }
9269
9270         tg3_restore_clk(tp);
9271
9272         /* Increase the core clock speed to fix tx timeout issue for 5762
9273          * with 100Mbps link speed.
9274          */
9275         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9276                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9277                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9278                      TG3_CPMU_MAC_ORIDE_ENABLE);
9279         }
9280
9281         /* Reprobe ASF enable state.  */
9282         tg3_flag_clear(tp, ENABLE_ASF);
9283         tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9284                            TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9285
9286         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9287         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9288         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9289                 u32 nic_cfg;
9290
9291                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9292                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9293                         tg3_flag_set(tp, ENABLE_ASF);
9294                         tp->last_event_jiffies = jiffies;
9295                         if (tg3_flag(tp, 5750_PLUS))
9296                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9297
9298                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9299                         if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9300                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9301                         if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9302                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9303                 }
9304         }
9305
9306         return 0;
9307 }
9308
9309 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9310 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9311 static void __tg3_set_rx_mode(struct net_device *);
9312
9313 /* tp->lock is held. */
9314 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9315 {
9316         int err;
9317
9318         tg3_stop_fw(tp);
9319
9320         tg3_write_sig_pre_reset(tp, kind);
9321
9322         tg3_abort_hw(tp, silent);
9323         err = tg3_chip_reset(tp);
9324
9325         __tg3_set_mac_addr(tp, false);
9326
9327         tg3_write_sig_legacy(tp, kind);
9328         tg3_write_sig_post_reset(tp, kind);
9329
9330         if (tp->hw_stats) {
9331                 /* Save the stats across chip resets... */
9332                 tg3_get_nstats(tp, &tp->net_stats_prev);
9333                 tg3_get_estats(tp, &tp->estats_prev);
9334
9335                 /* And make sure the next sample is new data */
9336                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9337         }
9338
9339         return err;
9340 }
9341
9342 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9343 {
9344         struct tg3 *tp = netdev_priv(dev);
9345         struct sockaddr *addr = p;
9346         int err = 0;
9347         bool skip_mac_1 = false;
9348
9349         if (!is_valid_ether_addr(addr->sa_data))
9350                 return -EADDRNOTAVAIL;
9351
9352         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9353
9354         if (!netif_running(dev))
9355                 return 0;
9356
9357         if (tg3_flag(tp, ENABLE_ASF)) {
9358                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9359
9360                 addr0_high = tr32(MAC_ADDR_0_HIGH);
9361                 addr0_low = tr32(MAC_ADDR_0_LOW);
9362                 addr1_high = tr32(MAC_ADDR_1_HIGH);
9363                 addr1_low = tr32(MAC_ADDR_1_LOW);
9364
9365                 /* Skip MAC addr 1 if ASF is using it. */
9366                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9367                     !(addr1_high == 0 && addr1_low == 0))
9368                         skip_mac_1 = true;
9369         }
9370         spin_lock_bh(&tp->lock);
9371         __tg3_set_mac_addr(tp, skip_mac_1);
9372         __tg3_set_rx_mode(dev);
9373         spin_unlock_bh(&tp->lock);
9374
9375         return err;
9376 }
9377
9378 /* tp->lock is held. */
9379 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9380                            dma_addr_t mapping, u32 maxlen_flags,
9381                            u32 nic_addr)
9382 {
9383         tg3_write_mem(tp,
9384                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9385                       ((u64) mapping >> 32));
9386         tg3_write_mem(tp,
9387                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9388                       ((u64) mapping & 0xffffffff));
9389         tg3_write_mem(tp,
9390                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9391                        maxlen_flags);
9392
9393         if (!tg3_flag(tp, 5705_PLUS))
9394                 tg3_write_mem(tp,
9395                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9396                               nic_addr);
9397 }
9398
9399
9400 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9401 {
9402         int i = 0;
9403
9404         if (!tg3_flag(tp, ENABLE_TSS)) {
9405                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9406                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9407                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9408         } else {
9409                 tw32(HOSTCC_TXCOL_TICKS, 0);
9410                 tw32(HOSTCC_TXMAX_FRAMES, 0);
9411                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9412
9413                 for (; i < tp->txq_cnt; i++) {
9414                         u32 reg;
9415
9416                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9417                         tw32(reg, ec->tx_coalesce_usecs);
9418                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9419                         tw32(reg, ec->tx_max_coalesced_frames);
9420                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9421                         tw32(reg, ec->tx_max_coalesced_frames_irq);
9422                 }
9423         }
9424
9425         for (; i < tp->irq_max - 1; i++) {
9426                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9427                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9428                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9429         }
9430 }
9431
9432 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9433 {
9434         int i = 0;
9435         u32 limit = tp->rxq_cnt;
9436
9437         if (!tg3_flag(tp, ENABLE_RSS)) {
9438                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9439                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9440                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9441                 limit--;
9442         } else {
9443                 tw32(HOSTCC_RXCOL_TICKS, 0);
9444                 tw32(HOSTCC_RXMAX_FRAMES, 0);
9445                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9446         }
9447
9448         for (; i < limit; i++) {
9449                 u32 reg;
9450
9451                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9452                 tw32(reg, ec->rx_coalesce_usecs);
9453                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9454                 tw32(reg, ec->rx_max_coalesced_frames);
9455                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9456                 tw32(reg, ec->rx_max_coalesced_frames_irq);
9457         }
9458
9459         for (; i < tp->irq_max - 1; i++) {
9460                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9461                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9462                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9463         }
9464 }
9465
9466 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9467 {
9468         tg3_coal_tx_init(tp, ec);
9469         tg3_coal_rx_init(tp, ec);
9470
9471         if (!tg3_flag(tp, 5705_PLUS)) {
9472                 u32 val = ec->stats_block_coalesce_usecs;
9473
9474                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9475                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9476
9477                 if (!tp->link_up)
9478                         val = 0;
9479
9480                 tw32(HOSTCC_STAT_COAL_TICKS, val);
9481         }
9482 }
9483
9484 /* tp->lock is held. */
9485 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9486 {
9487         u32 txrcb, limit;
9488
9489         /* Disable all transmit rings but the first. */
9490         if (!tg3_flag(tp, 5705_PLUS))
9491                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9492         else if (tg3_flag(tp, 5717_PLUS))
9493                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9494         else if (tg3_flag(tp, 57765_CLASS) ||
9495                  tg3_asic_rev(tp) == ASIC_REV_5762)
9496                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9497         else
9498                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9499
9500         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9501              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9502                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9503                               BDINFO_FLAGS_DISABLED);
9504 }
9505
9506 /* tp->lock is held. */
9507 static void tg3_tx_rcbs_init(struct tg3 *tp)
9508 {
9509         int i = 0;
9510         u32 txrcb = NIC_SRAM_SEND_RCB;
9511
9512         if (tg3_flag(tp, ENABLE_TSS))
9513                 i++;
9514
9515         for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9516                 struct tg3_napi *tnapi = &tp->napi[i];
9517
9518                 if (!tnapi->tx_ring)
9519                         continue;
9520
9521                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9522                                (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9523                                NIC_SRAM_TX_BUFFER_DESC);
9524         }
9525 }
9526
9527 /* tp->lock is held. */
9528 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9529 {
9530         u32 rxrcb, limit;
9531
9532         /* Disable all receive return rings but the first. */
9533         if (tg3_flag(tp, 5717_PLUS))
9534                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9535         else if (!tg3_flag(tp, 5705_PLUS))
9536                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9537         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9538                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
9539                  tg3_flag(tp, 57765_CLASS))
9540                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9541         else
9542                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9543
9544         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9545              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9546                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9547                               BDINFO_FLAGS_DISABLED);
9548 }
9549
9550 /* tp->lock is held. */
9551 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9552 {
9553         int i = 0;
9554         u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9555
9556         if (tg3_flag(tp, ENABLE_RSS))
9557                 i++;
9558
9559         for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9560                 struct tg3_napi *tnapi = &tp->napi[i];
9561
9562                 if (!tnapi->rx_rcb)
9563                         continue;
9564
9565                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9566                                (tp->rx_ret_ring_mask + 1) <<
9567                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9568         }
9569 }
9570
9571 /* tp->lock is held. */
9572 static void tg3_rings_reset(struct tg3 *tp)
9573 {
9574         int i;
9575         u32 stblk;
9576         struct tg3_napi *tnapi = &tp->napi[0];
9577
9578         tg3_tx_rcbs_disable(tp);
9579
9580         tg3_rx_ret_rcbs_disable(tp);
9581
9582         /* Disable interrupts */
9583         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9584         tp->napi[0].chk_msi_cnt = 0;
9585         tp->napi[0].last_rx_cons = 0;
9586         tp->napi[0].last_tx_cons = 0;
9587
9588         /* Zero mailbox registers. */
9589         if (tg3_flag(tp, SUPPORT_MSIX)) {
9590                 for (i = 1; i < tp->irq_max; i++) {
9591                         tp->napi[i].tx_prod = 0;
9592                         tp->napi[i].tx_cons = 0;
9593                         if (tg3_flag(tp, ENABLE_TSS))
9594                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
9595                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
9596                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9597                         tp->napi[i].chk_msi_cnt = 0;
9598                         tp->napi[i].last_rx_cons = 0;
9599                         tp->napi[i].last_tx_cons = 0;
9600                 }
9601                 if (!tg3_flag(tp, ENABLE_TSS))
9602                         tw32_mailbox(tp->napi[0].prodmbox, 0);
9603         } else {
9604                 tp->napi[0].tx_prod = 0;
9605                 tp->napi[0].tx_cons = 0;
9606                 tw32_mailbox(tp->napi[0].prodmbox, 0);
9607                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9608         }
9609
9610         /* Make sure the NIC-based send BD rings are disabled. */
9611         if (!tg3_flag(tp, 5705_PLUS)) {
9612                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9613                 for (i = 0; i < 16; i++)
9614                         tw32_tx_mbox(mbox + i * 8, 0);
9615         }
9616
9617         /* Clear status block in ram. */
9618         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9619
9620         /* Set status block DMA address */
9621         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9622              ((u64) tnapi->status_mapping >> 32));
9623         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9624              ((u64) tnapi->status_mapping & 0xffffffff));
9625
9626         stblk = HOSTCC_STATBLCK_RING1;
9627
9628         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9629                 u64 mapping = (u64)tnapi->status_mapping;
9630                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9631                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9632                 stblk += 8;
9633
9634                 /* Clear status block in ram. */
9635                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9636         }
9637
9638         tg3_tx_rcbs_init(tp);
9639         tg3_rx_ret_rcbs_init(tp);
9640 }
9641
9642 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9643 {
9644         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9645
9646         if (!tg3_flag(tp, 5750_PLUS) ||
9647             tg3_flag(tp, 5780_CLASS) ||
9648             tg3_asic_rev(tp) == ASIC_REV_5750 ||
9649             tg3_asic_rev(tp) == ASIC_REV_5752 ||
9650             tg3_flag(tp, 57765_PLUS))
9651                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9652         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9653                  tg3_asic_rev(tp) == ASIC_REV_5787)
9654                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9655         else
9656                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9657
9658         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9659         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9660
9661         val = min(nic_rep_thresh, host_rep_thresh);
9662         tw32(RCVBDI_STD_THRESH, val);
9663
9664         if (tg3_flag(tp, 57765_PLUS))
9665                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9666
9667         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9668                 return;
9669
9670         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9671
9672         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9673
9674         val = min(bdcache_maxcnt / 2, host_rep_thresh);
9675         tw32(RCVBDI_JUMBO_THRESH, val);
9676
9677         if (tg3_flag(tp, 57765_PLUS))
9678                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9679 }
9680
9681 static inline u32 calc_crc(unsigned char *buf, int len)
9682 {
9683         u32 reg;
9684         u32 tmp;
9685         int j, k;
9686
9687         reg = 0xffffffff;
9688
9689         for (j = 0; j < len; j++) {
9690                 reg ^= buf[j];
9691
9692                 for (k = 0; k < 8; k++) {
9693                         tmp = reg & 0x01;
9694
9695                         reg >>= 1;
9696
9697                         if (tmp)
9698                                 reg ^= 0xedb88320;
9699                 }
9700         }
9701
9702         return ~reg;
9703 }
9704
9705 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9706 {
9707         /* accept or reject all multicast frames */
9708         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9709         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9710         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9711         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9712 }
9713
9714 static void __tg3_set_rx_mode(struct net_device *dev)
9715 {
9716         struct tg3 *tp = netdev_priv(dev);
9717         u32 rx_mode;
9718
9719         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9720                                   RX_MODE_KEEP_VLAN_TAG);
9721
9722 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9723         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9724          * flag clear.
9725          */
9726         if (!tg3_flag(tp, ENABLE_ASF))
9727                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9728 #endif
9729
9730         if (dev->flags & IFF_PROMISC) {
9731                 /* Promiscuous mode. */
9732                 rx_mode |= RX_MODE_PROMISC;
9733         } else if (dev->flags & IFF_ALLMULTI) {
9734                 /* Accept all multicast. */
9735                 tg3_set_multi(tp, 1);
9736         } else if (netdev_mc_empty(dev)) {
9737                 /* Reject all multicast. */
9738                 tg3_set_multi(tp, 0);
9739         } else {
9740                 /* Accept one or more multicast(s). */
9741                 struct netdev_hw_addr *ha;
9742                 u32 mc_filter[4] = { 0, };
9743                 u32 regidx;
9744                 u32 bit;
9745                 u32 crc;
9746
9747                 netdev_for_each_mc_addr(ha, dev) {
9748                         crc = calc_crc(ha->addr, ETH_ALEN);
9749                         bit = ~crc & 0x7f;
9750                         regidx = (bit & 0x60) >> 5;
9751                         bit &= 0x1f;
9752                         mc_filter[regidx] |= (1 << bit);
9753                 }
9754
9755                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9756                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9757                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9758                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9759         }
9760
9761         if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9762                 rx_mode |= RX_MODE_PROMISC;
9763         } else if (!(dev->flags & IFF_PROMISC)) {
9764                 /* Add all entries into to the mac addr filter list */
9765                 int i = 0;
9766                 struct netdev_hw_addr *ha;
9767
9768                 netdev_for_each_uc_addr(ha, dev) {
9769                         __tg3_set_one_mac_addr(tp, ha->addr,
9770                                                i + TG3_UCAST_ADDR_IDX(tp));
9771                         i++;
9772                 }
9773         }
9774
9775         if (rx_mode != tp->rx_mode) {
9776                 tp->rx_mode = rx_mode;
9777                 tw32_f(MAC_RX_MODE, rx_mode);
9778                 udelay(10);
9779         }
9780 }
9781
9782 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9783 {
9784         int i;
9785
9786         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9787                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9788 }
9789
9790 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9791 {
9792         int i;
9793
9794         if (!tg3_flag(tp, SUPPORT_MSIX))
9795                 return;
9796
9797         if (tp->rxq_cnt == 1) {
9798                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9799                 return;
9800         }
9801
9802         /* Validate table against current IRQ count */
9803         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9804                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9805                         break;
9806         }
9807
9808         if (i != TG3_RSS_INDIR_TBL_SIZE)
9809                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9810 }
9811
9812 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9813 {
9814         int i = 0;
9815         u32 reg = MAC_RSS_INDIR_TBL_0;
9816
9817         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9818                 u32 val = tp->rss_ind_tbl[i];
9819                 i++;
9820                 for (; i % 8; i++) {
9821                         val <<= 4;
9822                         val |= tp->rss_ind_tbl[i];
9823                 }
9824                 tw32(reg, val);
9825                 reg += 4;
9826         }
9827 }
9828
9829 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9830 {
9831         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9832                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9833         else
9834                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9835 }
9836
9837 /* tp->lock is held. */
9838 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9839 {
9840         u32 val, rdmac_mode;
9841         int i, err, limit;
9842         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9843
9844         tg3_disable_ints(tp);
9845
9846         tg3_stop_fw(tp);
9847
9848         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9849
9850         if (tg3_flag(tp, INIT_COMPLETE))
9851                 tg3_abort_hw(tp, 1);
9852
9853         if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9854             !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9855                 tg3_phy_pull_config(tp);
9856                 tg3_eee_pull_config(tp, NULL);
9857                 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9858         }
9859
9860         /* Enable MAC control of LPI */
9861         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9862                 tg3_setup_eee(tp);
9863
9864         if (reset_phy)
9865                 tg3_phy_reset(tp);
9866
9867         err = tg3_chip_reset(tp);
9868         if (err)
9869                 return err;
9870
9871         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9872
9873         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9874                 val = tr32(TG3_CPMU_CTRL);
9875                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9876                 tw32(TG3_CPMU_CTRL, val);
9877
9878                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9879                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9880                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9881                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9882
9883                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9884                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9885                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9886                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9887
9888                 val = tr32(TG3_CPMU_HST_ACC);
9889                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9890                 val |= CPMU_HST_ACC_MACCLK_6_25;
9891                 tw32(TG3_CPMU_HST_ACC, val);
9892         }
9893
9894         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9895                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9896                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9897                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9898                 tw32(PCIE_PWR_MGMT_THRESH, val);
9899
9900                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9901                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9902
9903                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9904
9905                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9906                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9907         }
9908
9909         if (tg3_flag(tp, L1PLLPD_EN)) {
9910                 u32 grc_mode = tr32(GRC_MODE);
9911
9912                 /* Access the lower 1K of PL PCIE block registers. */
9913                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9914                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9915
9916                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9917                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9918                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9919
9920                 tw32(GRC_MODE, grc_mode);
9921         }
9922
9923         if (tg3_flag(tp, 57765_CLASS)) {
9924                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9925                         u32 grc_mode = tr32(GRC_MODE);
9926
9927                         /* Access the lower 1K of PL PCIE block registers. */
9928                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9929                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9930
9931                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9932                                    TG3_PCIE_PL_LO_PHYCTL5);
9933                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9934                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9935
9936                         tw32(GRC_MODE, grc_mode);
9937                 }
9938
9939                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9940                         u32 grc_mode;
9941
9942                         /* Fix transmit hangs */
9943                         val = tr32(TG3_CPMU_PADRNG_CTL);
9944                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9945                         tw32(TG3_CPMU_PADRNG_CTL, val);
9946
9947                         grc_mode = tr32(GRC_MODE);
9948
9949                         /* Access the lower 1K of DL PCIE block registers. */
9950                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9951                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9952
9953                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9954                                    TG3_PCIE_DL_LO_FTSMAX);
9955                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9956                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9957                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9958
9959                         tw32(GRC_MODE, grc_mode);
9960                 }
9961
9962                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9963                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9964                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9965                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9966         }
9967
9968         /* This works around an issue with Athlon chipsets on
9969          * B3 tigon3 silicon.  This bit has no effect on any
9970          * other revision.  But do not set this on PCI Express
9971          * chips and don't even touch the clocks if the CPMU is present.
9972          */
9973         if (!tg3_flag(tp, CPMU_PRESENT)) {
9974                 if (!tg3_flag(tp, PCI_EXPRESS))
9975                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9976                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9977         }
9978
9979         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9980             tg3_flag(tp, PCIX_MODE)) {
9981                 val = tr32(TG3PCI_PCISTATE);
9982                 val |= PCISTATE_RETRY_SAME_DMA;
9983                 tw32(TG3PCI_PCISTATE, val);
9984         }
9985
9986         if (tg3_flag(tp, ENABLE_APE)) {
9987                 /* Allow reads and writes to the
9988                  * APE register and memory space.
9989                  */
9990                 val = tr32(TG3PCI_PCISTATE);
9991                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9992                        PCISTATE_ALLOW_APE_SHMEM_WR |
9993                        PCISTATE_ALLOW_APE_PSPACE_WR;
9994                 tw32(TG3PCI_PCISTATE, val);
9995         }
9996
9997         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9998                 /* Enable some hw fixes.  */
9999                 val = tr32(TG3PCI_MSI_DATA);
10000                 val |= (1 << 26) | (1 << 28) | (1 << 29);
10001                 tw32(TG3PCI_MSI_DATA, val);
10002         }
10003
10004         /* Descriptor ring init may make accesses to the
10005          * NIC SRAM area to setup the TX descriptors, so we
10006          * can only do this after the hardware has been
10007          * successfully reset.
10008          */
10009         err = tg3_init_rings(tp);
10010         if (err)
10011                 return err;
10012
10013         if (tg3_flag(tp, 57765_PLUS)) {
10014                 val = tr32(TG3PCI_DMA_RW_CTRL) &
10015                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10016                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10017                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10018                 if (!tg3_flag(tp, 57765_CLASS) &&
10019                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
10020                     tg3_asic_rev(tp) != ASIC_REV_5762)
10021                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
10022                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10023         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10024                    tg3_asic_rev(tp) != ASIC_REV_5761) {
10025                 /* This value is determined during the probe time DMA
10026                  * engine test, tg3_test_dma.
10027                  */
10028                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10029         }
10030
10031         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10032                           GRC_MODE_4X_NIC_SEND_RINGS |
10033                           GRC_MODE_NO_TX_PHDR_CSUM |
10034                           GRC_MODE_NO_RX_PHDR_CSUM);
10035         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10036
10037         /* Pseudo-header checksum is done by hardware logic and not
10038          * the offload processers, so make the chip do the pseudo-
10039          * header checksums on receive.  For transmit it is more
10040          * convenient to do the pseudo-header checksum in software
10041          * as Linux does that on transmit for us in all cases.
10042          */
10043         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10044
10045         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10046         if (tp->rxptpctl)
10047                 tw32(TG3_RX_PTP_CTL,
10048                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10049
10050         if (tg3_flag(tp, PTP_CAPABLE))
10051                 val |= GRC_MODE_TIME_SYNC_ENABLE;
10052
10053         tw32(GRC_MODE, tp->grc_mode | val);
10054
10055         /* On one of the AMD platform, MRRS is restricted to 4000 because of
10056          * south bridge limitation. As a workaround, Driver is setting MRRS
10057          * to 2048 instead of default 4096.
10058          */
10059         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10060             tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10061                 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10062                 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10063         }
10064
10065         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
10066         val = tr32(GRC_MISC_CFG);
10067         val &= ~0xff;
10068         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10069         tw32(GRC_MISC_CFG, val);
10070
10071         /* Initialize MBUF/DESC pool. */
10072         if (tg3_flag(tp, 5750_PLUS)) {
10073                 /* Do nothing.  */
10074         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10075                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10076                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10077                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10078                 else
10079                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10080                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10081                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10082         } else if (tg3_flag(tp, TSO_CAPABLE)) {
10083                 int fw_len;
10084
10085                 fw_len = tp->fw_len;
10086                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10087                 tw32(BUFMGR_MB_POOL_ADDR,
10088                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10089                 tw32(BUFMGR_MB_POOL_SIZE,
10090                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10091         }
10092
10093         if (tp->dev->mtu <= ETH_DATA_LEN) {
10094                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10095                      tp->bufmgr_config.mbuf_read_dma_low_water);
10096                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10097                      tp->bufmgr_config.mbuf_mac_rx_low_water);
10098                 tw32(BUFMGR_MB_HIGH_WATER,
10099                      tp->bufmgr_config.mbuf_high_water);
10100         } else {
10101                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10102                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10103                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10104                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10105                 tw32(BUFMGR_MB_HIGH_WATER,
10106                      tp->bufmgr_config.mbuf_high_water_jumbo);
10107         }
10108         tw32(BUFMGR_DMA_LOW_WATER,
10109              tp->bufmgr_config.dma_low_water);
10110         tw32(BUFMGR_DMA_HIGH_WATER,
10111              tp->bufmgr_config.dma_high_water);
10112
10113         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10114         if (tg3_asic_rev(tp) == ASIC_REV_5719)
10115                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10116         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10117             tg3_asic_rev(tp) == ASIC_REV_5762 ||
10118             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10119             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10120                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10121         tw32(BUFMGR_MODE, val);
10122         for (i = 0; i < 2000; i++) {
10123                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10124                         break;
10125                 udelay(10);
10126         }
10127         if (i >= 2000) {
10128                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10129                 return -ENODEV;
10130         }
10131
10132         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10133                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10134
10135         tg3_setup_rxbd_thresholds(tp);
10136
10137         /* Initialize TG3_BDINFO's at:
10138          *  RCVDBDI_STD_BD:     standard eth size rx ring
10139          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
10140          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
10141          *
10142          * like so:
10143          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
10144          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
10145          *                              ring attribute flags
10146          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
10147          *
10148          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10149          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10150          *
10151          * The size of each ring is fixed in the firmware, but the location is
10152          * configurable.
10153          */
10154         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10155              ((u64) tpr->rx_std_mapping >> 32));
10156         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10157              ((u64) tpr->rx_std_mapping & 0xffffffff));
10158         if (!tg3_flag(tp, 5717_PLUS))
10159                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10160                      NIC_SRAM_RX_BUFFER_DESC);
10161
10162         /* Disable the mini ring */
10163         if (!tg3_flag(tp, 5705_PLUS))
10164                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10165                      BDINFO_FLAGS_DISABLED);
10166
10167         /* Program the jumbo buffer descriptor ring control
10168          * blocks on those devices that have them.
10169          */
10170         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10171             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10172
10173                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10174                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10175                              ((u64) tpr->rx_jmb_mapping >> 32));
10176                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10177                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10178                         val = TG3_RX_JMB_RING_SIZE(tp) <<
10179                               BDINFO_FLAGS_MAXLEN_SHIFT;
10180                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10181                              val | BDINFO_FLAGS_USE_EXT_RECV);
10182                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10183                             tg3_flag(tp, 57765_CLASS) ||
10184                             tg3_asic_rev(tp) == ASIC_REV_5762)
10185                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10186                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10187                 } else {
10188                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10189                              BDINFO_FLAGS_DISABLED);
10190                 }
10191
10192                 if (tg3_flag(tp, 57765_PLUS)) {
10193                         val = TG3_RX_STD_RING_SIZE(tp);
10194                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10195                         val |= (TG3_RX_STD_DMA_SZ << 2);
10196                 } else
10197                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10198         } else
10199                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10200
10201         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10202
10203         tpr->rx_std_prod_idx = tp->rx_pending;
10204         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10205
10206         tpr->rx_jmb_prod_idx =
10207                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10208         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10209
10210         tg3_rings_reset(tp);
10211
10212         /* Initialize MAC address and backoff seed. */
10213         __tg3_set_mac_addr(tp, false);
10214
10215         /* MTU + ethernet header + FCS + optional VLAN tag */
10216         tw32(MAC_RX_MTU_SIZE,
10217              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10218
10219         /* The slot time is changed by tg3_setup_phy if we
10220          * run at gigabit with half duplex.
10221          */
10222         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10223               (6 << TX_LENGTHS_IPG_SHIFT) |
10224               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10225
10226         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10227             tg3_asic_rev(tp) == ASIC_REV_5762)
10228                 val |= tr32(MAC_TX_LENGTHS) &
10229                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
10230                         TX_LENGTHS_CNT_DWN_VAL_MSK);
10231
10232         tw32(MAC_TX_LENGTHS, val);
10233
10234         /* Receive rules. */
10235         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10236         tw32(RCVLPC_CONFIG, 0x0181);
10237
10238         /* Calculate RDMAC_MODE setting early, we need it to determine
10239          * the RCVLPC_STATE_ENABLE mask.
10240          */
10241         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10242                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10243                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10244                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10245                       RDMAC_MODE_LNGREAD_ENAB);
10246
10247         if (tg3_asic_rev(tp) == ASIC_REV_5717)
10248                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10249
10250         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10251             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10252             tg3_asic_rev(tp) == ASIC_REV_57780)
10253                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10254                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10255                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10256
10257         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10258             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10259                 if (tg3_flag(tp, TSO_CAPABLE) &&
10260                     tg3_asic_rev(tp) == ASIC_REV_5705) {
10261                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10262                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10263                            !tg3_flag(tp, IS_5788)) {
10264                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10265                 }
10266         }
10267
10268         if (tg3_flag(tp, PCI_EXPRESS))
10269                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10270
10271         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10272                 tp->dma_limit = 0;
10273                 if (tp->dev->mtu <= ETH_DATA_LEN) {
10274                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10275                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10276                 }
10277         }
10278
10279         if (tg3_flag(tp, HW_TSO_1) ||
10280             tg3_flag(tp, HW_TSO_2) ||
10281             tg3_flag(tp, HW_TSO_3))
10282                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10283
10284         if (tg3_flag(tp, 57765_PLUS) ||
10285             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10286             tg3_asic_rev(tp) == ASIC_REV_57780)
10287                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10288
10289         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10290             tg3_asic_rev(tp) == ASIC_REV_5762)
10291                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10292
10293         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10294             tg3_asic_rev(tp) == ASIC_REV_5784 ||
10295             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10296             tg3_asic_rev(tp) == ASIC_REV_57780 ||
10297             tg3_flag(tp, 57765_PLUS)) {
10298                 u32 tgtreg;
10299
10300                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10301                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10302                 else
10303                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
10304
10305                 val = tr32(tgtreg);
10306                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10307                     tg3_asic_rev(tp) == ASIC_REV_5762) {
10308                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10309                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10310                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10311                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10312                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10313                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10314                 }
10315                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10316         }
10317
10318         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10319             tg3_asic_rev(tp) == ASIC_REV_5720 ||
10320             tg3_asic_rev(tp) == ASIC_REV_5762) {
10321                 u32 tgtreg;
10322
10323                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10324                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10325                 else
10326                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10327
10328                 val = tr32(tgtreg);
10329                 tw32(tgtreg, val |
10330                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10331                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10332         }
10333
10334         /* Receive/send statistics. */
10335         if (tg3_flag(tp, 5750_PLUS)) {
10336                 val = tr32(RCVLPC_STATS_ENABLE);
10337                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10338                 tw32(RCVLPC_STATS_ENABLE, val);
10339         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10340                    tg3_flag(tp, TSO_CAPABLE)) {
10341                 val = tr32(RCVLPC_STATS_ENABLE);
10342                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10343                 tw32(RCVLPC_STATS_ENABLE, val);
10344         } else {
10345                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10346         }
10347         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10348         tw32(SNDDATAI_STATSENAB, 0xffffff);
10349         tw32(SNDDATAI_STATSCTRL,
10350              (SNDDATAI_SCTRL_ENABLE |
10351               SNDDATAI_SCTRL_FASTUPD));
10352
10353         /* Setup host coalescing engine. */
10354         tw32(HOSTCC_MODE, 0);
10355         for (i = 0; i < 2000; i++) {
10356                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10357                         break;
10358                 udelay(10);
10359         }
10360
10361         __tg3_set_coalesce(tp, &tp->coal);
10362
10363         if (!tg3_flag(tp, 5705_PLUS)) {
10364                 /* Status/statistics block address.  See tg3_timer,
10365                  * the tg3_periodic_fetch_stats call there, and
10366                  * tg3_get_stats to see how this works for 5705/5750 chips.
10367                  */
10368                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10369                      ((u64) tp->stats_mapping >> 32));
10370                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10371                      ((u64) tp->stats_mapping & 0xffffffff));
10372                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10373
10374                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10375
10376                 /* Clear statistics and status block memory areas */
10377                 for (i = NIC_SRAM_STATS_BLK;
10378                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10379                      i += sizeof(u32)) {
10380                         tg3_write_mem(tp, i, 0);
10381                         udelay(40);
10382                 }
10383         }
10384
10385         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10386
10387         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10388         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10389         if (!tg3_flag(tp, 5705_PLUS))
10390                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10391
10392         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10393                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10394                 /* reset to prevent losing 1st rx packet intermittently */
10395                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10396                 udelay(10);
10397         }
10398
10399         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10400                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10401                         MAC_MODE_FHDE_ENABLE;
10402         if (tg3_flag(tp, ENABLE_APE))
10403                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10404         if (!tg3_flag(tp, 5705_PLUS) &&
10405             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10406             tg3_asic_rev(tp) != ASIC_REV_5700)
10407                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10408         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10409         udelay(40);
10410
10411         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10412          * If TG3_FLAG_IS_NIC is zero, we should read the
10413          * register to preserve the GPIO settings for LOMs. The GPIOs,
10414          * whether used as inputs or outputs, are set by boot code after
10415          * reset.
10416          */
10417         if (!tg3_flag(tp, IS_NIC)) {
10418                 u32 gpio_mask;
10419
10420                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10421                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10422                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10423
10424                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10425                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10426                                      GRC_LCLCTRL_GPIO_OUTPUT3;
10427
10428                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10429                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10430
10431                 tp->grc_local_ctrl &= ~gpio_mask;
10432                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10433
10434                 /* GPIO1 must be driven high for eeprom write protect */
10435                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10436                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10437                                                GRC_LCLCTRL_GPIO_OUTPUT1);
10438         }
10439         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10440         udelay(100);
10441
10442         if (tg3_flag(tp, USING_MSIX)) {
10443                 val = tr32(MSGINT_MODE);
10444                 val |= MSGINT_MODE_ENABLE;
10445                 if (tp->irq_cnt > 1)
10446                         val |= MSGINT_MODE_MULTIVEC_EN;
10447                 if (!tg3_flag(tp, 1SHOT_MSI))
10448                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10449                 tw32(MSGINT_MODE, val);
10450         }
10451
10452         if (!tg3_flag(tp, 5705_PLUS)) {
10453                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10454                 udelay(40);
10455         }
10456
10457         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10458                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10459                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10460                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10461                WDMAC_MODE_LNGREAD_ENAB);
10462
10463         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10464             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10465                 if (tg3_flag(tp, TSO_CAPABLE) &&
10466                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10467                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10468                         /* nothing */
10469                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10470                            !tg3_flag(tp, IS_5788)) {
10471                         val |= WDMAC_MODE_RX_ACCEL;
10472                 }
10473         }
10474
10475         /* Enable host coalescing bug fix */
10476         if (tg3_flag(tp, 5755_PLUS))
10477                 val |= WDMAC_MODE_STATUS_TAG_FIX;
10478
10479         if (tg3_asic_rev(tp) == ASIC_REV_5785)
10480                 val |= WDMAC_MODE_BURST_ALL_DATA;
10481
10482         tw32_f(WDMAC_MODE, val);
10483         udelay(40);
10484
10485         if (tg3_flag(tp, PCIX_MODE)) {
10486                 u16 pcix_cmd;
10487
10488                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10489                                      &pcix_cmd);
10490                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10491                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10492                         pcix_cmd |= PCI_X_CMD_READ_2K;
10493                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10494                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10495                         pcix_cmd |= PCI_X_CMD_READ_2K;
10496                 }
10497                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10498                                       pcix_cmd);
10499         }
10500
10501         tw32_f(RDMAC_MODE, rdmac_mode);
10502         udelay(40);
10503
10504         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10505             tg3_asic_rev(tp) == ASIC_REV_5720) {
10506                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10507                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10508                                 break;
10509                 }
10510                 if (i < TG3_NUM_RDMA_CHANNELS) {
10511                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10512                         val |= tg3_lso_rd_dma_workaround_bit(tp);
10513                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10514                         tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10515                 }
10516         }
10517
10518         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10519         if (!tg3_flag(tp, 5705_PLUS))
10520                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10521
10522         if (tg3_asic_rev(tp) == ASIC_REV_5761)
10523                 tw32(SNDDATAC_MODE,
10524                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10525         else
10526                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10527
10528         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10529         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10530         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10531         if (tg3_flag(tp, LRG_PROD_RING_CAP))
10532                 val |= RCVDBDI_MODE_LRG_RING_SZ;
10533         tw32(RCVDBDI_MODE, val);
10534         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10535         if (tg3_flag(tp, HW_TSO_1) ||
10536             tg3_flag(tp, HW_TSO_2) ||
10537             tg3_flag(tp, HW_TSO_3))
10538                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10539         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10540         if (tg3_flag(tp, ENABLE_TSS))
10541                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10542         tw32(SNDBDI_MODE, val);
10543         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10544
10545         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10546                 err = tg3_load_5701_a0_firmware_fix(tp);
10547                 if (err)
10548                         return err;
10549         }
10550
10551         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10552                 /* Ignore any errors for the firmware download. If download
10553                  * fails, the device will operate with EEE disabled
10554                  */
10555                 tg3_load_57766_firmware(tp);
10556         }
10557
10558         if (tg3_flag(tp, TSO_CAPABLE)) {
10559                 err = tg3_load_tso_firmware(tp);
10560                 if (err)
10561                         return err;
10562         }
10563
10564         tp->tx_mode = TX_MODE_ENABLE;
10565
10566         if (tg3_flag(tp, 5755_PLUS) ||
10567             tg3_asic_rev(tp) == ASIC_REV_5906)
10568                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10569
10570         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10571             tg3_asic_rev(tp) == ASIC_REV_5762) {
10572                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10573                 tp->tx_mode &= ~val;
10574                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10575         }
10576
10577         tw32_f(MAC_TX_MODE, tp->tx_mode);
10578         udelay(100);
10579
10580         if (tg3_flag(tp, ENABLE_RSS)) {
10581                 u32 rss_key[10];
10582
10583                 tg3_rss_write_indir_tbl(tp);
10584
10585                 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10586
10587                 for (i = 0; i < 10 ; i++)
10588                         tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10589         }
10590
10591         tp->rx_mode = RX_MODE_ENABLE;
10592         if (tg3_flag(tp, 5755_PLUS))
10593                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10594
10595         if (tg3_asic_rev(tp) == ASIC_REV_5762)
10596                 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10597
10598         if (tg3_flag(tp, ENABLE_RSS))
10599                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10600                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
10601                                RX_MODE_RSS_IPV6_HASH_EN |
10602                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
10603                                RX_MODE_RSS_IPV4_HASH_EN |
10604                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
10605
10606         tw32_f(MAC_RX_MODE, tp->rx_mode);
10607         udelay(10);
10608
10609         tw32(MAC_LED_CTRL, tp->led_ctrl);
10610
10611         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10612         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10613                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10614                 udelay(10);
10615         }
10616         tw32_f(MAC_RX_MODE, tp->rx_mode);
10617         udelay(10);
10618
10619         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10620                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10621                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10622                         /* Set drive transmission level to 1.2V  */
10623                         /* only if the signal pre-emphasis bit is not set  */
10624                         val = tr32(MAC_SERDES_CFG);
10625                         val &= 0xfffff000;
10626                         val |= 0x880;
10627                         tw32(MAC_SERDES_CFG, val);
10628                 }
10629                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10630                         tw32(MAC_SERDES_CFG, 0x616000);
10631         }
10632
10633         /* Prevent chip from dropping frames when flow control
10634          * is enabled.
10635          */
10636         if (tg3_flag(tp, 57765_CLASS))
10637                 val = 1;
10638         else
10639                 val = 2;
10640         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10641
10642         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10643             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10644                 /* Use hardware link auto-negotiation */
10645                 tg3_flag_set(tp, HW_AUTONEG);
10646         }
10647
10648         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10649             tg3_asic_rev(tp) == ASIC_REV_5714) {
10650                 u32 tmp;
10651
10652                 tmp = tr32(SERDES_RX_CTRL);
10653                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10654                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10655                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10656                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10657         }
10658
10659         if (!tg3_flag(tp, USE_PHYLIB)) {
10660                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10661                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10662
10663                 err = tg3_setup_phy(tp, false);
10664                 if (err)
10665                         return err;
10666
10667                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10668                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10669                         u32 tmp;
10670
10671                         /* Clear CRC stats. */
10672                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10673                                 tg3_writephy(tp, MII_TG3_TEST1,
10674                                              tmp | MII_TG3_TEST1_CRC_EN);
10675                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10676                         }
10677                 }
10678         }
10679
10680         __tg3_set_rx_mode(tp->dev);
10681
10682         /* Initialize receive rules. */
10683         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10684         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10685         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10686         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10687
10688         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10689                 limit = 8;
10690         else
10691                 limit = 16;
10692         if (tg3_flag(tp, ENABLE_ASF))
10693                 limit -= 4;
10694         switch (limit) {
10695         case 16:
10696                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10697         case 15:
10698                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10699         case 14:
10700                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10701         case 13:
10702                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10703         case 12:
10704                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10705         case 11:
10706                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10707         case 10:
10708                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10709         case 9:
10710                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10711         case 8:
10712                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10713         case 7:
10714                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10715         case 6:
10716                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10717         case 5:
10718                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10719         case 4:
10720                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10721         case 3:
10722                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10723         case 2:
10724         case 1:
10725
10726         default:
10727                 break;
10728         }
10729
10730         if (tg3_flag(tp, ENABLE_APE))
10731                 /* Write our heartbeat update interval to APE. */
10732                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10733                                 APE_HOST_HEARTBEAT_INT_DISABLE);
10734
10735         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10736
10737         return 0;
10738 }
10739
10740 /* Called at device open time to get the chip ready for
10741  * packet processing.  Invoked with tp->lock held.
10742  */
10743 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10744 {
10745         /* Chip may have been just powered on. If so, the boot code may still
10746          * be running initialization. Wait for it to finish to avoid races in
10747          * accessing the hardware.
10748          */
10749         tg3_enable_register_access(tp);
10750         tg3_poll_fw(tp);
10751
10752         tg3_switch_clocks(tp);
10753
10754         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10755
10756         return tg3_reset_hw(tp, reset_phy);
10757 }
10758
10759 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10760 {
10761         int i;
10762
10763         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10764                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10765
10766                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10767                 off += len;
10768
10769                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10770                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10771                         memset(ocir, 0, TG3_OCIR_LEN);
10772         }
10773 }
10774
10775 /* sysfs attributes for hwmon */
10776 static ssize_t tg3_show_temp(struct device *dev,
10777                              struct device_attribute *devattr, char *buf)
10778 {
10779         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10780         struct tg3 *tp = dev_get_drvdata(dev);
10781         u32 temperature;
10782
10783         spin_lock_bh(&tp->lock);
10784         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10785                                 sizeof(temperature));
10786         spin_unlock_bh(&tp->lock);
10787         return sprintf(buf, "%u\n", temperature * 1000);
10788 }
10789
10790
10791 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10792                           TG3_TEMP_SENSOR_OFFSET);
10793 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10794                           TG3_TEMP_CAUTION_OFFSET);
10795 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10796                           TG3_TEMP_MAX_OFFSET);
10797
10798 static struct attribute *tg3_attrs[] = {
10799         &sensor_dev_attr_temp1_input.dev_attr.attr,
10800         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10801         &sensor_dev_attr_temp1_max.dev_attr.attr,
10802         NULL
10803 };
10804 ATTRIBUTE_GROUPS(tg3);
10805
10806 static void tg3_hwmon_close(struct tg3 *tp)
10807 {
10808         if (tp->hwmon_dev) {
10809                 hwmon_device_unregister(tp->hwmon_dev);
10810                 tp->hwmon_dev = NULL;
10811         }
10812 }
10813
10814 static void tg3_hwmon_open(struct tg3 *tp)
10815 {
10816         int i;
10817         u32 size = 0;
10818         struct pci_dev *pdev = tp->pdev;
10819         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10820
10821         tg3_sd_scan_scratchpad(tp, ocirs);
10822
10823         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10824                 if (!ocirs[i].src_data_length)
10825                         continue;
10826
10827                 size += ocirs[i].src_hdr_length;
10828                 size += ocirs[i].src_data_length;
10829         }
10830
10831         if (!size)
10832                 return;
10833
10834         tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10835                                                           tp, tg3_groups);
10836         if (IS_ERR(tp->hwmon_dev)) {
10837                 tp->hwmon_dev = NULL;
10838                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10839         }
10840 }
10841
10842
10843 #define TG3_STAT_ADD32(PSTAT, REG) \
10844 do {    u32 __val = tr32(REG); \
10845         (PSTAT)->low += __val; \
10846         if ((PSTAT)->low < __val) \
10847                 (PSTAT)->high += 1; \
10848 } while (0)
10849
10850 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10851 {
10852         struct tg3_hw_stats *sp = tp->hw_stats;
10853
10854         if (!tp->link_up)
10855                 return;
10856
10857         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10858         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10859         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10860         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10861         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10862         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10863         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10864         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10865         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10866         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10867         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10868         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10869         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10870         if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10871                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10872                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10873                 u32 val;
10874
10875                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10876                 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10877                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10878                 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10879         }
10880
10881         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10882         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10883         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10884         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10885         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10886         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10887         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10888         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10889         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10890         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10891         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10892         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10893         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10894         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10895
10896         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10897         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10898             tg3_asic_rev(tp) != ASIC_REV_5762 &&
10899             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10900             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10901                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10902         } else {
10903                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10904                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10905                 if (val) {
10906                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10907                         sp->rx_discards.low += val;
10908                         if (sp->rx_discards.low < val)
10909                                 sp->rx_discards.high += 1;
10910                 }
10911                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10912         }
10913         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10914 }
10915
10916 static void tg3_chk_missed_msi(struct tg3 *tp)
10917 {
10918         u32 i;
10919
10920         for (i = 0; i < tp->irq_cnt; i++) {
10921                 struct tg3_napi *tnapi = &tp->napi[i];
10922
10923                 if (tg3_has_work(tnapi)) {
10924                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10925                             tnapi->last_tx_cons == tnapi->tx_cons) {
10926                                 if (tnapi->chk_msi_cnt < 1) {
10927                                         tnapi->chk_msi_cnt++;
10928                                         return;
10929                                 }
10930                                 tg3_msi(0, tnapi);
10931                         }
10932                 }
10933                 tnapi->chk_msi_cnt = 0;
10934                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10935                 tnapi->last_tx_cons = tnapi->tx_cons;
10936         }
10937 }
10938
10939 static void tg3_timer(unsigned long __opaque)
10940 {
10941         struct tg3 *tp = (struct tg3 *) __opaque;
10942
10943         spin_lock(&tp->lock);
10944
10945         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10946                 spin_unlock(&tp->lock);
10947                 goto restart_timer;
10948         }
10949
10950         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10951             tg3_flag(tp, 57765_CLASS))
10952                 tg3_chk_missed_msi(tp);
10953
10954         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10955                 /* BCM4785: Flush posted writes from GbE to host memory. */
10956                 tr32(HOSTCC_MODE);
10957         }
10958
10959         if (!tg3_flag(tp, TAGGED_STATUS)) {
10960                 /* All of this garbage is because when using non-tagged
10961                  * IRQ status the mailbox/status_block protocol the chip
10962                  * uses with the cpu is race prone.
10963                  */
10964                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10965                         tw32(GRC_LOCAL_CTRL,
10966                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10967                 } else {
10968                         tw32(HOSTCC_MODE, tp->coalesce_mode |
10969                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10970                 }
10971
10972                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10973                         spin_unlock(&tp->lock);
10974                         tg3_reset_task_schedule(tp);
10975                         goto restart_timer;
10976                 }
10977         }
10978
10979         /* This part only runs once per second. */
10980         if (!--tp->timer_counter) {
10981                 if (tg3_flag(tp, 5705_PLUS))
10982                         tg3_periodic_fetch_stats(tp);
10983
10984                 if (tp->setlpicnt && !--tp->setlpicnt)
10985                         tg3_phy_eee_enable(tp);
10986
10987                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10988                         u32 mac_stat;
10989                         int phy_event;
10990
10991                         mac_stat = tr32(MAC_STATUS);
10992
10993                         phy_event = 0;
10994                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10995                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10996                                         phy_event = 1;
10997                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10998                                 phy_event = 1;
10999
11000                         if (phy_event)
11001                                 tg3_setup_phy(tp, false);
11002                 } else if (tg3_flag(tp, POLL_SERDES)) {
11003                         u32 mac_stat = tr32(MAC_STATUS);
11004                         int need_setup = 0;
11005
11006                         if (tp->link_up &&
11007                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11008                                 need_setup = 1;
11009                         }
11010                         if (!tp->link_up &&
11011                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
11012                                          MAC_STATUS_SIGNAL_DET))) {
11013                                 need_setup = 1;
11014                         }
11015                         if (need_setup) {
11016                                 if (!tp->serdes_counter) {
11017                                         tw32_f(MAC_MODE,
11018                                              (tp->mac_mode &
11019                                               ~MAC_MODE_PORT_MODE_MASK));
11020                                         udelay(40);
11021                                         tw32_f(MAC_MODE, tp->mac_mode);
11022                                         udelay(40);
11023                                 }
11024                                 tg3_setup_phy(tp, false);
11025                         }
11026                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11027                            tg3_flag(tp, 5780_CLASS)) {
11028                         tg3_serdes_parallel_detect(tp);
11029                 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11030                         u32 cpmu = tr32(TG3_CPMU_STATUS);
11031                         bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11032                                          TG3_CPMU_STATUS_LINK_MASK);
11033
11034                         if (link_up != tp->link_up)
11035                                 tg3_setup_phy(tp, false);
11036                 }
11037
11038                 tp->timer_counter = tp->timer_multiplier;
11039         }
11040
11041         /* Heartbeat is only sent once every 2 seconds.
11042          *
11043          * The heartbeat is to tell the ASF firmware that the host
11044          * driver is still alive.  In the event that the OS crashes,
11045          * ASF needs to reset the hardware to free up the FIFO space
11046          * that may be filled with rx packets destined for the host.
11047          * If the FIFO is full, ASF will no longer function properly.
11048          *
11049          * Unintended resets have been reported on real time kernels
11050          * where the timer doesn't run on time.  Netpoll will also have
11051          * same problem.
11052          *
11053          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11054          * to check the ring condition when the heartbeat is expiring
11055          * before doing the reset.  This will prevent most unintended
11056          * resets.
11057          */
11058         if (!--tp->asf_counter) {
11059                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11060                         tg3_wait_for_event_ack(tp);
11061
11062                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11063                                       FWCMD_NICDRV_ALIVE3);
11064                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11065                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11066                                       TG3_FW_UPDATE_TIMEOUT_SEC);
11067
11068                         tg3_generate_fw_event(tp);
11069                 }
11070                 tp->asf_counter = tp->asf_multiplier;
11071         }
11072
11073         spin_unlock(&tp->lock);
11074
11075 restart_timer:
11076         tp->timer.expires = jiffies + tp->timer_offset;
11077         add_timer(&tp->timer);
11078 }
11079
11080 static void tg3_timer_init(struct tg3 *tp)
11081 {
11082         if (tg3_flag(tp, TAGGED_STATUS) &&
11083             tg3_asic_rev(tp) != ASIC_REV_5717 &&
11084             !tg3_flag(tp, 57765_CLASS))
11085                 tp->timer_offset = HZ;
11086         else
11087                 tp->timer_offset = HZ / 10;
11088
11089         BUG_ON(tp->timer_offset > HZ);
11090
11091         tp->timer_multiplier = (HZ / tp->timer_offset);
11092         tp->asf_multiplier = (HZ / tp->timer_offset) *
11093                              TG3_FW_UPDATE_FREQ_SEC;
11094
11095         init_timer(&tp->timer);
11096         tp->timer.data = (unsigned long) tp;
11097         tp->timer.function = tg3_timer;
11098 }
11099
11100 static void tg3_timer_start(struct tg3 *tp)
11101 {
11102         tp->asf_counter   = tp->asf_multiplier;
11103         tp->timer_counter = tp->timer_multiplier;
11104
11105         tp->timer.expires = jiffies + tp->timer_offset;
11106         add_timer(&tp->timer);
11107 }
11108
11109 static void tg3_timer_stop(struct tg3 *tp)
11110 {
11111         del_timer_sync(&tp->timer);
11112 }
11113
11114 /* Restart hardware after configuration changes, self-test, etc.
11115  * Invoked with tp->lock held.
11116  */
11117 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11118         __releases(tp->lock)
11119         __acquires(tp->lock)
11120 {
11121         int err;
11122
11123         err = tg3_init_hw(tp, reset_phy);
11124         if (err) {
11125                 netdev_err(tp->dev,
11126                            "Failed to re-initialize device, aborting\n");
11127                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11128                 tg3_full_unlock(tp);
11129                 tg3_timer_stop(tp);
11130                 tp->irq_sync = 0;
11131                 tg3_napi_enable(tp);
11132                 dev_close(tp->dev);
11133                 tg3_full_lock(tp, 0);
11134         }
11135         return err;
11136 }
11137
11138 static void tg3_reset_task(struct work_struct *work)
11139 {
11140         struct tg3 *tp = container_of(work, struct tg3, reset_task);
11141         int err;
11142
11143         rtnl_lock();
11144         tg3_full_lock(tp, 0);
11145
11146         if (!netif_running(tp->dev)) {
11147                 tg3_flag_clear(tp, RESET_TASK_PENDING);
11148                 tg3_full_unlock(tp);
11149                 rtnl_unlock();
11150                 return;
11151         }
11152
11153         tg3_full_unlock(tp);
11154
11155         tg3_phy_stop(tp);
11156
11157         tg3_netif_stop(tp);
11158
11159         tg3_full_lock(tp, 1);
11160
11161         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11162                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11163                 tp->write32_rx_mbox = tg3_write_flush_reg32;
11164                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11165                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11166         }
11167
11168         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11169         err = tg3_init_hw(tp, true);
11170         if (err) {
11171                 tg3_full_unlock(tp);
11172                 tp->irq_sync = 0;
11173                 tg3_napi_enable(tp);
11174                 /* Clear this flag so that tg3_reset_task_cancel() will not
11175                  * call cancel_work_sync() and wait forever.
11176                  */
11177                 tg3_flag_clear(tp, RESET_TASK_PENDING);
11178                 dev_close(tp->dev);
11179                 goto out;
11180         }
11181
11182         tg3_netif_start(tp);
11183
11184         tg3_full_unlock(tp);
11185
11186         if (!err)
11187                 tg3_phy_start(tp);
11188
11189         tg3_flag_clear(tp, RESET_TASK_PENDING);
11190 out:
11191         rtnl_unlock();
11192 }
11193
11194 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11195 {
11196         irq_handler_t fn;
11197         unsigned long flags;
11198         char *name;
11199         struct tg3_napi *tnapi = &tp->napi[irq_num];
11200
11201         if (tp->irq_cnt == 1)
11202                 name = tp->dev->name;
11203         else {
11204                 name = &tnapi->irq_lbl[0];
11205                 if (tnapi->tx_buffers && tnapi->rx_rcb)
11206                         snprintf(name, IFNAMSIZ,
11207                                  "%s-txrx-%d", tp->dev->name, irq_num);
11208                 else if (tnapi->tx_buffers)
11209                         snprintf(name, IFNAMSIZ,
11210                                  "%s-tx-%d", tp->dev->name, irq_num);
11211                 else if (tnapi->rx_rcb)
11212                         snprintf(name, IFNAMSIZ,
11213                                  "%s-rx-%d", tp->dev->name, irq_num);
11214                 else
11215                         snprintf(name, IFNAMSIZ,
11216                                  "%s-%d", tp->dev->name, irq_num);
11217                 name[IFNAMSIZ-1] = 0;
11218         }
11219
11220         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11221                 fn = tg3_msi;
11222                 if (tg3_flag(tp, 1SHOT_MSI))
11223                         fn = tg3_msi_1shot;
11224                 flags = 0;
11225         } else {
11226                 fn = tg3_interrupt;
11227                 if (tg3_flag(tp, TAGGED_STATUS))
11228                         fn = tg3_interrupt_tagged;
11229                 flags = IRQF_SHARED;
11230         }
11231
11232         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11233 }
11234
11235 static int tg3_test_interrupt(struct tg3 *tp)
11236 {
11237         struct tg3_napi *tnapi = &tp->napi[0];
11238         struct net_device *dev = tp->dev;
11239         int err, i, intr_ok = 0;
11240         u32 val;
11241
11242         if (!netif_running(dev))
11243                 return -ENODEV;
11244
11245         tg3_disable_ints(tp);
11246
11247         free_irq(tnapi->irq_vec, tnapi);
11248
11249         /*
11250          * Turn off MSI one shot mode.  Otherwise this test has no
11251          * observable way to know whether the interrupt was delivered.
11252          */
11253         if (tg3_flag(tp, 57765_PLUS)) {
11254                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11255                 tw32(MSGINT_MODE, val);
11256         }
11257
11258         err = request_irq(tnapi->irq_vec, tg3_test_isr,
11259                           IRQF_SHARED, dev->name, tnapi);
11260         if (err)
11261                 return err;
11262
11263         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11264         tg3_enable_ints(tp);
11265
11266         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11267                tnapi->coal_now);
11268
11269         for (i = 0; i < 5; i++) {
11270                 u32 int_mbox, misc_host_ctrl;
11271
11272                 int_mbox = tr32_mailbox(tnapi->int_mbox);
11273                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11274
11275                 if ((int_mbox != 0) ||
11276                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11277                         intr_ok = 1;
11278                         break;
11279                 }
11280
11281                 if (tg3_flag(tp, 57765_PLUS) &&
11282                     tnapi->hw_status->status_tag != tnapi->last_tag)
11283                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11284
11285                 msleep(10);
11286         }
11287
11288         tg3_disable_ints(tp);
11289
11290         free_irq(tnapi->irq_vec, tnapi);
11291
11292         err = tg3_request_irq(tp, 0);
11293
11294         if (err)
11295                 return err;
11296
11297         if (intr_ok) {
11298                 /* Reenable MSI one shot mode. */
11299                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11300                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11301                         tw32(MSGINT_MODE, val);
11302                 }
11303                 return 0;
11304         }
11305
11306         return -EIO;
11307 }
11308
11309 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11310  * successfully restored
11311  */
11312 static int tg3_test_msi(struct tg3 *tp)
11313 {
11314         int err;
11315         u16 pci_cmd;
11316
11317         if (!tg3_flag(tp, USING_MSI))
11318                 return 0;
11319
11320         /* Turn off SERR reporting in case MSI terminates with Master
11321          * Abort.
11322          */
11323         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11324         pci_write_config_word(tp->pdev, PCI_COMMAND,
11325                               pci_cmd & ~PCI_COMMAND_SERR);
11326
11327         err = tg3_test_interrupt(tp);
11328
11329         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11330
11331         if (!err)
11332                 return 0;
11333
11334         /* other failures */
11335         if (err != -EIO)
11336                 return err;
11337
11338         /* MSI test failed, go back to INTx mode */
11339         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11340                     "to INTx mode. Please report this failure to the PCI "
11341                     "maintainer and include system chipset information\n");
11342
11343         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11344
11345         pci_disable_msi(tp->pdev);
11346
11347         tg3_flag_clear(tp, USING_MSI);
11348         tp->napi[0].irq_vec = tp->pdev->irq;
11349
11350         err = tg3_request_irq(tp, 0);
11351         if (err)
11352                 return err;
11353
11354         /* Need to reset the chip because the MSI cycle may have terminated
11355          * with Master Abort.
11356          */
11357         tg3_full_lock(tp, 1);
11358
11359         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11360         err = tg3_init_hw(tp, true);
11361
11362         tg3_full_unlock(tp);
11363
11364         if (err)
11365                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11366
11367         return err;
11368 }
11369
11370 static int tg3_request_firmware(struct tg3 *tp)
11371 {
11372         const struct tg3_firmware_hdr *fw_hdr;
11373
11374         if (reject_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11375                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11376                            tp->fw_needed);
11377                 return -ENOENT;
11378         }
11379
11380         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11381
11382         /* Firmware blob starts with version numbers, followed by
11383          * start address and _full_ length including BSS sections
11384          * (which must be longer than the actual data, of course
11385          */
11386
11387         tp->fw_len = be32_to_cpu(fw_hdr->len);  /* includes bss */
11388         if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11389                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11390                            tp->fw_len, tp->fw_needed);
11391                 release_firmware(tp->fw);
11392                 tp->fw = NULL;
11393                 return -EINVAL;
11394         }
11395
11396         /* We no longer need firmware; we have it. */
11397         tp->fw_needed = NULL;
11398         return 0;
11399 }
11400
11401 static u32 tg3_irq_count(struct tg3 *tp)
11402 {
11403         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11404
11405         if (irq_cnt > 1) {
11406                 /* We want as many rx rings enabled as there are cpus.
11407                  * In multiqueue MSI-X mode, the first MSI-X vector
11408                  * only deals with link interrupts, etc, so we add
11409                  * one to the number of vectors we are requesting.
11410                  */
11411                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11412         }
11413
11414         return irq_cnt;
11415 }
11416
11417 static bool tg3_enable_msix(struct tg3 *tp)
11418 {
11419         int i, rc;
11420         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11421
11422         tp->txq_cnt = tp->txq_req;
11423         tp->rxq_cnt = tp->rxq_req;
11424         if (!tp->rxq_cnt)
11425                 tp->rxq_cnt = netif_get_num_default_rss_queues();
11426         if (tp->rxq_cnt > tp->rxq_max)
11427                 tp->rxq_cnt = tp->rxq_max;
11428
11429         /* Disable multiple TX rings by default.  Simple round-robin hardware
11430          * scheduling of the TX rings can cause starvation of rings with
11431          * small packets when other rings have TSO or jumbo packets.
11432          */
11433         if (!tp->txq_req)
11434                 tp->txq_cnt = 1;
11435
11436         tp->irq_cnt = tg3_irq_count(tp);
11437
11438         for (i = 0; i < tp->irq_max; i++) {
11439                 msix_ent[i].entry  = i;
11440                 msix_ent[i].vector = 0;
11441         }
11442
11443         rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11444         if (rc < 0) {
11445                 return false;
11446         } else if (rc < tp->irq_cnt) {
11447                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11448                               tp->irq_cnt, rc);
11449                 tp->irq_cnt = rc;
11450                 tp->rxq_cnt = max(rc - 1, 1);
11451                 if (tp->txq_cnt)
11452                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11453         }
11454
11455         for (i = 0; i < tp->irq_max; i++)
11456                 tp->napi[i].irq_vec = msix_ent[i].vector;
11457
11458         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11459                 pci_disable_msix(tp->pdev);
11460                 return false;
11461         }
11462
11463         if (tp->irq_cnt == 1)
11464                 return true;
11465
11466         tg3_flag_set(tp, ENABLE_RSS);
11467
11468         if (tp->txq_cnt > 1)
11469                 tg3_flag_set(tp, ENABLE_TSS);
11470
11471         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11472
11473         return true;
11474 }
11475
11476 static void tg3_ints_init(struct tg3 *tp)
11477 {
11478         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11479             !tg3_flag(tp, TAGGED_STATUS)) {
11480                 /* All MSI supporting chips should support tagged
11481                  * status.  Assert that this is the case.
11482                  */
11483                 netdev_warn(tp->dev,
11484                             "MSI without TAGGED_STATUS? Not using MSI\n");
11485                 goto defcfg;
11486         }
11487
11488         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11489                 tg3_flag_set(tp, USING_MSIX);
11490         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11491                 tg3_flag_set(tp, USING_MSI);
11492
11493         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11494                 u32 msi_mode = tr32(MSGINT_MODE);
11495                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11496                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11497                 if (!tg3_flag(tp, 1SHOT_MSI))
11498                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11499                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11500         }
11501 defcfg:
11502         if (!tg3_flag(tp, USING_MSIX)) {
11503                 tp->irq_cnt = 1;
11504                 tp->napi[0].irq_vec = tp->pdev->irq;
11505         }
11506
11507         if (tp->irq_cnt == 1) {
11508                 tp->txq_cnt = 1;
11509                 tp->rxq_cnt = 1;
11510                 netif_set_real_num_tx_queues(tp->dev, 1);
11511                 netif_set_real_num_rx_queues(tp->dev, 1);
11512         }
11513 }
11514
11515 static void tg3_ints_fini(struct tg3 *tp)
11516 {
11517         if (tg3_flag(tp, USING_MSIX))
11518                 pci_disable_msix(tp->pdev);
11519         else if (tg3_flag(tp, USING_MSI))
11520                 pci_disable_msi(tp->pdev);
11521         tg3_flag_clear(tp, USING_MSI);
11522         tg3_flag_clear(tp, USING_MSIX);
11523         tg3_flag_clear(tp, ENABLE_RSS);
11524         tg3_flag_clear(tp, ENABLE_TSS);
11525 }
11526
11527 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11528                      bool init)
11529 {
11530         struct net_device *dev = tp->dev;
11531         int i, err;
11532
11533         /*
11534          * Setup interrupts first so we know how
11535          * many NAPI resources to allocate
11536          */
11537         tg3_ints_init(tp);
11538
11539         tg3_rss_check_indir_tbl(tp);
11540
11541         /* The placement of this call is tied
11542          * to the setup and use of Host TX descriptors.
11543          */
11544         err = tg3_alloc_consistent(tp);
11545         if (err)
11546                 goto out_ints_fini;
11547
11548         tg3_napi_init(tp);
11549
11550         tg3_napi_enable(tp);
11551
11552         for (i = 0; i < tp->irq_cnt; i++) {
11553                 struct tg3_napi *tnapi = &tp->napi[i];
11554                 err = tg3_request_irq(tp, i);
11555                 if (err) {
11556                         for (i--; i >= 0; i--) {
11557                                 tnapi = &tp->napi[i];
11558                                 free_irq(tnapi->irq_vec, tnapi);
11559                         }
11560                         goto out_napi_fini;
11561                 }
11562         }
11563
11564         tg3_full_lock(tp, 0);
11565
11566         if (init)
11567                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11568
11569         err = tg3_init_hw(tp, reset_phy);
11570         if (err) {
11571                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11572                 tg3_free_rings(tp);
11573         }
11574
11575         tg3_full_unlock(tp);
11576
11577         if (err)
11578                 goto out_free_irq;
11579
11580         if (test_irq && tg3_flag(tp, USING_MSI)) {
11581                 err = tg3_test_msi(tp);
11582
11583                 if (err) {
11584                         tg3_full_lock(tp, 0);
11585                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11586                         tg3_free_rings(tp);
11587                         tg3_full_unlock(tp);
11588
11589                         goto out_napi_fini;
11590                 }
11591
11592                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11593                         u32 val = tr32(PCIE_TRANSACTION_CFG);
11594
11595                         tw32(PCIE_TRANSACTION_CFG,
11596                              val | PCIE_TRANS_CFG_1SHOT_MSI);
11597                 }
11598         }
11599
11600         tg3_phy_start(tp);
11601
11602         tg3_hwmon_open(tp);
11603
11604         tg3_full_lock(tp, 0);
11605
11606         tg3_timer_start(tp);
11607         tg3_flag_set(tp, INIT_COMPLETE);
11608         tg3_enable_ints(tp);
11609
11610         tg3_ptp_resume(tp);
11611
11612         tg3_full_unlock(tp);
11613
11614         netif_tx_start_all_queues(dev);
11615
11616         /*
11617          * Reset loopback feature if it was turned on while the device was down
11618          * make sure that it's installed properly now.
11619          */
11620         if (dev->features & NETIF_F_LOOPBACK)
11621                 tg3_set_loopback(dev, dev->features);
11622
11623         return 0;
11624
11625 out_free_irq:
11626         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11627                 struct tg3_napi *tnapi = &tp->napi[i];
11628                 free_irq(tnapi->irq_vec, tnapi);
11629         }
11630
11631 out_napi_fini:
11632         tg3_napi_disable(tp);
11633         tg3_napi_fini(tp);
11634         tg3_free_consistent(tp);
11635
11636 out_ints_fini:
11637         tg3_ints_fini(tp);
11638
11639         return err;
11640 }
11641
11642 static void tg3_stop(struct tg3 *tp)
11643 {
11644         int i;
11645
11646         tg3_reset_task_cancel(tp);
11647         tg3_netif_stop(tp);
11648
11649         tg3_timer_stop(tp);
11650
11651         tg3_hwmon_close(tp);
11652
11653         tg3_phy_stop(tp);
11654
11655         tg3_full_lock(tp, 1);
11656
11657         tg3_disable_ints(tp);
11658
11659         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11660         tg3_free_rings(tp);
11661         tg3_flag_clear(tp, INIT_COMPLETE);
11662
11663         tg3_full_unlock(tp);
11664
11665         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11666                 struct tg3_napi *tnapi = &tp->napi[i];
11667                 free_irq(tnapi->irq_vec, tnapi);
11668         }
11669
11670         tg3_ints_fini(tp);
11671
11672         tg3_napi_fini(tp);
11673
11674         tg3_free_consistent(tp);
11675 }
11676
11677 static int tg3_open(struct net_device *dev)
11678 {
11679         struct tg3 *tp = netdev_priv(dev);
11680         int err;
11681
11682         if (tp->pcierr_recovery) {
11683                 netdev_err(dev, "Failed to open device. PCI error recovery "
11684                            "in progress\n");
11685                 return -EAGAIN;
11686         }
11687
11688         if (tp->fw_needed) {
11689                 err = tg3_request_firmware(tp);
11690                 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11691                         if (err) {
11692                                 netdev_warn(tp->dev, "EEE capability disabled\n");
11693                                 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11694                         } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11695                                 netdev_warn(tp->dev, "EEE capability restored\n");
11696                                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11697                         }
11698                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11699                         if (err)
11700                                 return err;
11701                 } else if (err) {
11702                         netdev_warn(tp->dev, "TSO capability disabled\n");
11703                         tg3_flag_clear(tp, TSO_CAPABLE);
11704                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11705                         netdev_notice(tp->dev, "TSO capability restored\n");
11706                         tg3_flag_set(tp, TSO_CAPABLE);
11707                 }
11708         }
11709
11710         tg3_carrier_off(tp);
11711
11712         err = tg3_power_up(tp);
11713         if (err)
11714                 return err;
11715
11716         tg3_full_lock(tp, 0);
11717
11718         tg3_disable_ints(tp);
11719         tg3_flag_clear(tp, INIT_COMPLETE);
11720
11721         tg3_full_unlock(tp);
11722
11723         err = tg3_start(tp,
11724                         !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11725                         true, true);
11726         if (err) {
11727                 tg3_frob_aux_power(tp, false);
11728                 pci_set_power_state(tp->pdev, PCI_D3hot);
11729         }
11730
11731         return err;
11732 }
11733
11734 static int tg3_close(struct net_device *dev)
11735 {
11736         struct tg3 *tp = netdev_priv(dev);
11737
11738         if (tp->pcierr_recovery) {
11739                 netdev_err(dev, "Failed to close device. PCI error recovery "
11740                            "in progress\n");
11741                 return -EAGAIN;
11742         }
11743
11744         tg3_stop(tp);
11745
11746         /* Clear stats across close / open calls */
11747         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11748         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11749
11750         if (pci_device_is_present(tp->pdev)) {
11751                 tg3_power_down_prepare(tp);
11752
11753                 tg3_carrier_off(tp);
11754         }
11755         return 0;
11756 }
11757
11758 static inline u64 get_stat64(tg3_stat64_t *val)
11759 {
11760        return ((u64)val->high << 32) | ((u64)val->low);
11761 }
11762
11763 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11764 {
11765         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11766
11767         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11768             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11769              tg3_asic_rev(tp) == ASIC_REV_5701)) {
11770                 u32 val;
11771
11772                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11773                         tg3_writephy(tp, MII_TG3_TEST1,
11774                                      val | MII_TG3_TEST1_CRC_EN);
11775                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11776                 } else
11777                         val = 0;
11778
11779                 tp->phy_crc_errors += val;
11780
11781                 return tp->phy_crc_errors;
11782         }
11783
11784         return get_stat64(&hw_stats->rx_fcs_errors);
11785 }
11786
11787 #define ESTAT_ADD(member) \
11788         estats->member =        old_estats->member + \
11789                                 get_stat64(&hw_stats->member)
11790
11791 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11792 {
11793         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11794         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11795
11796         ESTAT_ADD(rx_octets);
11797         ESTAT_ADD(rx_fragments);
11798         ESTAT_ADD(rx_ucast_packets);
11799         ESTAT_ADD(rx_mcast_packets);
11800         ESTAT_ADD(rx_bcast_packets);
11801         ESTAT_ADD(rx_fcs_errors);
11802         ESTAT_ADD(rx_align_errors);
11803         ESTAT_ADD(rx_xon_pause_rcvd);
11804         ESTAT_ADD(rx_xoff_pause_rcvd);
11805         ESTAT_ADD(rx_mac_ctrl_rcvd);
11806         ESTAT_ADD(rx_xoff_entered);
11807         ESTAT_ADD(rx_frame_too_long_errors);
11808         ESTAT_ADD(rx_jabbers);
11809         ESTAT_ADD(rx_undersize_packets);
11810         ESTAT_ADD(rx_in_length_errors);
11811         ESTAT_ADD(rx_out_length_errors);
11812         ESTAT_ADD(rx_64_or_less_octet_packets);
11813         ESTAT_ADD(rx_65_to_127_octet_packets);
11814         ESTAT_ADD(rx_128_to_255_octet_packets);
11815         ESTAT_ADD(rx_256_to_511_octet_packets);
11816         ESTAT_ADD(rx_512_to_1023_octet_packets);
11817         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11818         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11819         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11820         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11821         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11822
11823         ESTAT_ADD(tx_octets);
11824         ESTAT_ADD(tx_collisions);
11825         ESTAT_ADD(tx_xon_sent);
11826         ESTAT_ADD(tx_xoff_sent);
11827         ESTAT_ADD(tx_flow_control);
11828         ESTAT_ADD(tx_mac_errors);
11829         ESTAT_ADD(tx_single_collisions);
11830         ESTAT_ADD(tx_mult_collisions);
11831         ESTAT_ADD(tx_deferred);
11832         ESTAT_ADD(tx_excessive_collisions);
11833         ESTAT_ADD(tx_late_collisions);
11834         ESTAT_ADD(tx_collide_2times);
11835         ESTAT_ADD(tx_collide_3times);
11836         ESTAT_ADD(tx_collide_4times);
11837         ESTAT_ADD(tx_collide_5times);
11838         ESTAT_ADD(tx_collide_6times);
11839         ESTAT_ADD(tx_collide_7times);
11840         ESTAT_ADD(tx_collide_8times);
11841         ESTAT_ADD(tx_collide_9times);
11842         ESTAT_ADD(tx_collide_10times);
11843         ESTAT_ADD(tx_collide_11times);
11844         ESTAT_ADD(tx_collide_12times);
11845         ESTAT_ADD(tx_collide_13times);
11846         ESTAT_ADD(tx_collide_14times);
11847         ESTAT_ADD(tx_collide_15times);
11848         ESTAT_ADD(tx_ucast_packets);
11849         ESTAT_ADD(tx_mcast_packets);
11850         ESTAT_ADD(tx_bcast_packets);
11851         ESTAT_ADD(tx_carrier_sense_errors);
11852         ESTAT_ADD(tx_discards);
11853         ESTAT_ADD(tx_errors);
11854
11855         ESTAT_ADD(dma_writeq_full);
11856         ESTAT_ADD(dma_write_prioq_full);
11857         ESTAT_ADD(rxbds_empty);
11858         ESTAT_ADD(rx_discards);
11859         ESTAT_ADD(rx_errors);
11860         ESTAT_ADD(rx_threshold_hit);
11861
11862         ESTAT_ADD(dma_readq_full);
11863         ESTAT_ADD(dma_read_prioq_full);
11864         ESTAT_ADD(tx_comp_queue_full);
11865
11866         ESTAT_ADD(ring_set_send_prod_index);
11867         ESTAT_ADD(ring_status_update);
11868         ESTAT_ADD(nic_irqs);
11869         ESTAT_ADD(nic_avoided_irqs);
11870         ESTAT_ADD(nic_tx_threshold_hit);
11871
11872         ESTAT_ADD(mbuf_lwm_thresh_hit);
11873 }
11874
11875 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11876 {
11877         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11878         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11879
11880         stats->rx_packets = old_stats->rx_packets +
11881                 get_stat64(&hw_stats->rx_ucast_packets) +
11882                 get_stat64(&hw_stats->rx_mcast_packets) +
11883                 get_stat64(&hw_stats->rx_bcast_packets);
11884
11885         stats->tx_packets = old_stats->tx_packets +
11886                 get_stat64(&hw_stats->tx_ucast_packets) +
11887                 get_stat64(&hw_stats->tx_mcast_packets) +
11888                 get_stat64(&hw_stats->tx_bcast_packets);
11889
11890         stats->rx_bytes = old_stats->rx_bytes +
11891                 get_stat64(&hw_stats->rx_octets);
11892         stats->tx_bytes = old_stats->tx_bytes +
11893                 get_stat64(&hw_stats->tx_octets);
11894
11895         stats->rx_errors = old_stats->rx_errors +
11896                 get_stat64(&hw_stats->rx_errors);
11897         stats->tx_errors = old_stats->tx_errors +
11898                 get_stat64(&hw_stats->tx_errors) +
11899                 get_stat64(&hw_stats->tx_mac_errors) +
11900                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11901                 get_stat64(&hw_stats->tx_discards);
11902
11903         stats->multicast = old_stats->multicast +
11904                 get_stat64(&hw_stats->rx_mcast_packets);
11905         stats->collisions = old_stats->collisions +
11906                 get_stat64(&hw_stats->tx_collisions);
11907
11908         stats->rx_length_errors = old_stats->rx_length_errors +
11909                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11910                 get_stat64(&hw_stats->rx_undersize_packets);
11911
11912         stats->rx_frame_errors = old_stats->rx_frame_errors +
11913                 get_stat64(&hw_stats->rx_align_errors);
11914         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11915                 get_stat64(&hw_stats->tx_discards);
11916         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11917                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11918
11919         stats->rx_crc_errors = old_stats->rx_crc_errors +
11920                 tg3_calc_crc_errors(tp);
11921
11922         stats->rx_missed_errors = old_stats->rx_missed_errors +
11923                 get_stat64(&hw_stats->rx_discards);
11924
11925         stats->rx_dropped = tp->rx_dropped;
11926         stats->tx_dropped = tp->tx_dropped;
11927 }
11928
11929 static int tg3_get_regs_len(struct net_device *dev)
11930 {
11931         return TG3_REG_BLK_SIZE;
11932 }
11933
11934 static void tg3_get_regs(struct net_device *dev,
11935                 struct ethtool_regs *regs, void *_p)
11936 {
11937         struct tg3 *tp = netdev_priv(dev);
11938
11939         regs->version = 0;
11940
11941         memset(_p, 0, TG3_REG_BLK_SIZE);
11942
11943         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11944                 return;
11945
11946         tg3_full_lock(tp, 0);
11947
11948         tg3_dump_legacy_regs(tp, (u32 *)_p);
11949
11950         tg3_full_unlock(tp);
11951 }
11952
11953 static int tg3_get_eeprom_len(struct net_device *dev)
11954 {
11955         struct tg3 *tp = netdev_priv(dev);
11956
11957         return tp->nvram_size;
11958 }
11959
11960 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11961 {
11962         struct tg3 *tp = netdev_priv(dev);
11963         int ret, cpmu_restore = 0;
11964         u8  *pd;
11965         u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11966         __be32 val;
11967
11968         if (tg3_flag(tp, NO_NVRAM))
11969                 return -EINVAL;
11970
11971         offset = eeprom->offset;
11972         len = eeprom->len;
11973         eeprom->len = 0;
11974
11975         eeprom->magic = TG3_EEPROM_MAGIC;
11976
11977         /* Override clock, link aware and link idle modes */
11978         if (tg3_flag(tp, CPMU_PRESENT)) {
11979                 cpmu_val = tr32(TG3_CPMU_CTRL);
11980                 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
11981                                 CPMU_CTRL_LINK_IDLE_MODE)) {
11982                         tw32(TG3_CPMU_CTRL, cpmu_val &
11983                                             ~(CPMU_CTRL_LINK_AWARE_MODE |
11984                                              CPMU_CTRL_LINK_IDLE_MODE));
11985                         cpmu_restore = 1;
11986                 }
11987         }
11988         tg3_override_clk(tp);
11989
11990         if (offset & 3) {
11991                 /* adjustments to start on required 4 byte boundary */
11992                 b_offset = offset & 3;
11993                 b_count = 4 - b_offset;
11994                 if (b_count > len) {
11995                         /* i.e. offset=1 len=2 */
11996                         b_count = len;
11997                 }
11998                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11999                 if (ret)
12000                         goto eeprom_done;
12001                 memcpy(data, ((char *)&val) + b_offset, b_count);
12002                 len -= b_count;
12003                 offset += b_count;
12004                 eeprom->len += b_count;
12005         }
12006
12007         /* read bytes up to the last 4 byte boundary */
12008         pd = &data[eeprom->len];
12009         for (i = 0; i < (len - (len & 3)); i += 4) {
12010                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12011                 if (ret) {
12012                         if (i)
12013                                 i -= 4;
12014                         eeprom->len += i;
12015                         goto eeprom_done;
12016                 }
12017                 memcpy(pd + i, &val, 4);
12018                 if (need_resched()) {
12019                         if (signal_pending(current)) {
12020                                 eeprom->len += i;
12021                                 ret = -EINTR;
12022                                 goto eeprom_done;
12023                         }
12024                         cond_resched();
12025                 }
12026         }
12027         eeprom->len += i;
12028
12029         if (len & 3) {
12030                 /* read last bytes not ending on 4 byte boundary */
12031                 pd = &data[eeprom->len];
12032                 b_count = len & 3;
12033                 b_offset = offset + len - b_count;
12034                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12035                 if (ret)
12036                         goto eeprom_done;
12037                 memcpy(pd, &val, b_count);
12038                 eeprom->len += b_count;
12039         }
12040         ret = 0;
12041
12042 eeprom_done:
12043         /* Restore clock, link aware and link idle modes */
12044         tg3_restore_clk(tp);
12045         if (cpmu_restore)
12046                 tw32(TG3_CPMU_CTRL, cpmu_val);
12047
12048         return ret;
12049 }
12050
12051 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12052 {
12053         struct tg3 *tp = netdev_priv(dev);
12054         int ret;
12055         u32 offset, len, b_offset, odd_len;
12056         u8 *buf;
12057         __be32 start = 0, end;
12058
12059         if (tg3_flag(tp, NO_NVRAM) ||
12060             eeprom->magic != TG3_EEPROM_MAGIC)
12061                 return -EINVAL;
12062
12063         offset = eeprom->offset;
12064         len = eeprom->len;
12065
12066         if ((b_offset = (offset & 3))) {
12067                 /* adjustments to start on required 4 byte boundary */
12068                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12069                 if (ret)
12070                         return ret;
12071                 len += b_offset;
12072                 offset &= ~3;
12073                 if (len < 4)
12074                         len = 4;
12075         }
12076
12077         odd_len = 0;
12078         if (len & 3) {
12079                 /* adjustments to end on required 4 byte boundary */
12080                 odd_len = 1;
12081                 len = (len + 3) & ~3;
12082                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12083                 if (ret)
12084                         return ret;
12085         }
12086
12087         buf = data;
12088         if (b_offset || odd_len) {
12089                 buf = kmalloc(len, GFP_KERNEL);
12090                 if (!buf)
12091                         return -ENOMEM;
12092                 if (b_offset)
12093                         memcpy(buf, &start, 4);
12094                 if (odd_len)
12095                         memcpy(buf+len-4, &end, 4);
12096                 memcpy(buf + b_offset, data, eeprom->len);
12097         }
12098
12099         ret = tg3_nvram_write_block(tp, offset, len, buf);
12100
12101         if (buf != data)
12102                 kfree(buf);
12103
12104         return ret;
12105 }
12106
12107 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
12108 {
12109         struct tg3 *tp = netdev_priv(dev);
12110
12111         if (tg3_flag(tp, USE_PHYLIB)) {
12112                 struct phy_device *phydev;
12113                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12114                         return -EAGAIN;
12115                 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
12116                 return phy_ethtool_gset(phydev, cmd);
12117         }
12118
12119         cmd->supported = (SUPPORTED_Autoneg);
12120
12121         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12122                 cmd->supported |= (SUPPORTED_1000baseT_Half |
12123                                    SUPPORTED_1000baseT_Full);
12124
12125         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12126                 cmd->supported |= (SUPPORTED_100baseT_Half |
12127                                   SUPPORTED_100baseT_Full |
12128                                   SUPPORTED_10baseT_Half |
12129                                   SUPPORTED_10baseT_Full |
12130                                   SUPPORTED_TP);
12131                 cmd->port = PORT_TP;
12132         } else {
12133                 cmd->supported |= SUPPORTED_FIBRE;
12134                 cmd->port = PORT_FIBRE;
12135         }
12136
12137         cmd->advertising = tp->link_config.advertising;
12138         if (tg3_flag(tp, PAUSE_AUTONEG)) {
12139                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12140                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12141                                 cmd->advertising |= ADVERTISED_Pause;
12142                         } else {
12143                                 cmd->advertising |= ADVERTISED_Pause |
12144                                                     ADVERTISED_Asym_Pause;
12145                         }
12146                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12147                         cmd->advertising |= ADVERTISED_Asym_Pause;
12148                 }
12149         }
12150         if (netif_running(dev) && tp->link_up) {
12151                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
12152                 cmd->duplex = tp->link_config.active_duplex;
12153                 cmd->lp_advertising = tp->link_config.rmt_adv;
12154                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12155                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12156                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
12157                         else
12158                                 cmd->eth_tp_mdix = ETH_TP_MDI;
12159                 }
12160         } else {
12161                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
12162                 cmd->duplex = DUPLEX_UNKNOWN;
12163                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
12164         }
12165         cmd->phy_address = tp->phy_addr;
12166         cmd->transceiver = XCVR_INTERNAL;
12167         cmd->autoneg = tp->link_config.autoneg;
12168         cmd->maxtxpkt = 0;
12169         cmd->maxrxpkt = 0;
12170         return 0;
12171 }
12172
12173 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
12174 {
12175         struct tg3 *tp = netdev_priv(dev);
12176         u32 speed = ethtool_cmd_speed(cmd);
12177
12178         if (tg3_flag(tp, USE_PHYLIB)) {
12179                 struct phy_device *phydev;
12180                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12181                         return -EAGAIN;
12182                 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
12183                 return phy_ethtool_sset(phydev, cmd);
12184         }
12185
12186         if (cmd->autoneg != AUTONEG_ENABLE &&
12187             cmd->autoneg != AUTONEG_DISABLE)
12188                 return -EINVAL;
12189
12190         if (cmd->autoneg == AUTONEG_DISABLE &&
12191             cmd->duplex != DUPLEX_FULL &&
12192             cmd->duplex != DUPLEX_HALF)
12193                 return -EINVAL;
12194
12195         if (cmd->autoneg == AUTONEG_ENABLE) {
12196                 u32 mask = ADVERTISED_Autoneg |
12197                            ADVERTISED_Pause |
12198                            ADVERTISED_Asym_Pause;
12199
12200                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12201                         mask |= ADVERTISED_1000baseT_Half |
12202                                 ADVERTISED_1000baseT_Full;
12203
12204                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12205                         mask |= ADVERTISED_100baseT_Half |
12206                                 ADVERTISED_100baseT_Full |
12207                                 ADVERTISED_10baseT_Half |
12208                                 ADVERTISED_10baseT_Full |
12209                                 ADVERTISED_TP;
12210                 else
12211                         mask |= ADVERTISED_FIBRE;
12212
12213                 if (cmd->advertising & ~mask)
12214                         return -EINVAL;
12215
12216                 mask &= (ADVERTISED_1000baseT_Half |
12217                          ADVERTISED_1000baseT_Full |
12218                          ADVERTISED_100baseT_Half |
12219                          ADVERTISED_100baseT_Full |
12220                          ADVERTISED_10baseT_Half |
12221                          ADVERTISED_10baseT_Full);
12222
12223                 cmd->advertising &= mask;
12224         } else {
12225                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12226                         if (speed != SPEED_1000)
12227                                 return -EINVAL;
12228
12229                         if (cmd->duplex != DUPLEX_FULL)
12230                                 return -EINVAL;
12231                 } else {
12232                         if (speed != SPEED_100 &&
12233                             speed != SPEED_10)
12234                                 return -EINVAL;
12235                 }
12236         }
12237
12238         tg3_full_lock(tp, 0);
12239
12240         tp->link_config.autoneg = cmd->autoneg;
12241         if (cmd->autoneg == AUTONEG_ENABLE) {
12242                 tp->link_config.advertising = (cmd->advertising |
12243                                               ADVERTISED_Autoneg);
12244                 tp->link_config.speed = SPEED_UNKNOWN;
12245                 tp->link_config.duplex = DUPLEX_UNKNOWN;
12246         } else {
12247                 tp->link_config.advertising = 0;
12248                 tp->link_config.speed = speed;
12249                 tp->link_config.duplex = cmd->duplex;
12250         }
12251
12252         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12253
12254         tg3_warn_mgmt_link_flap(tp);
12255
12256         if (netif_running(dev))
12257                 tg3_setup_phy(tp, true);
12258
12259         tg3_full_unlock(tp);
12260
12261         return 0;
12262 }
12263
12264 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12265 {
12266         struct tg3 *tp = netdev_priv(dev);
12267
12268         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12269         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12270         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12271         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12272 }
12273
12274 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12275 {
12276         struct tg3 *tp = netdev_priv(dev);
12277
12278         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12279                 wol->supported = WAKE_MAGIC;
12280         else
12281                 wol->supported = 0;
12282         wol->wolopts = 0;
12283         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12284                 wol->wolopts = WAKE_MAGIC;
12285         memset(&wol->sopass, 0, sizeof(wol->sopass));
12286 }
12287
12288 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12289 {
12290         struct tg3 *tp = netdev_priv(dev);
12291         struct device *dp = &tp->pdev->dev;
12292
12293         if (wol->wolopts & ~WAKE_MAGIC)
12294                 return -EINVAL;
12295         if ((wol->wolopts & WAKE_MAGIC) &&
12296             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12297                 return -EINVAL;
12298
12299         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12300
12301         if (device_may_wakeup(dp))
12302                 tg3_flag_set(tp, WOL_ENABLE);
12303         else
12304                 tg3_flag_clear(tp, WOL_ENABLE);
12305
12306         return 0;
12307 }
12308
12309 static u32 tg3_get_msglevel(struct net_device *dev)
12310 {
12311         struct tg3 *tp = netdev_priv(dev);
12312         return tp->msg_enable;
12313 }
12314
12315 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12316 {
12317         struct tg3 *tp = netdev_priv(dev);
12318         tp->msg_enable = value;
12319 }
12320
12321 static int tg3_nway_reset(struct net_device *dev)
12322 {
12323         struct tg3 *tp = netdev_priv(dev);
12324         int r;
12325
12326         if (!netif_running(dev))
12327                 return -EAGAIN;
12328
12329         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12330                 return -EINVAL;
12331
12332         tg3_warn_mgmt_link_flap(tp);
12333
12334         if (tg3_flag(tp, USE_PHYLIB)) {
12335                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12336                         return -EAGAIN;
12337                 r = phy_start_aneg(tp->mdio_bus->phy_map[tp->phy_addr]);
12338         } else {
12339                 u32 bmcr;
12340
12341                 spin_lock_bh(&tp->lock);
12342                 r = -EINVAL;
12343                 tg3_readphy(tp, MII_BMCR, &bmcr);
12344                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12345                     ((bmcr & BMCR_ANENABLE) ||
12346                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12347                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12348                                                    BMCR_ANENABLE);
12349                         r = 0;
12350                 }
12351                 spin_unlock_bh(&tp->lock);
12352         }
12353
12354         return r;
12355 }
12356
12357 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12358 {
12359         struct tg3 *tp = netdev_priv(dev);
12360
12361         ering->rx_max_pending = tp->rx_std_ring_mask;
12362         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12363                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12364         else
12365                 ering->rx_jumbo_max_pending = 0;
12366
12367         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12368
12369         ering->rx_pending = tp->rx_pending;
12370         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12371                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12372         else
12373                 ering->rx_jumbo_pending = 0;
12374
12375         ering->tx_pending = tp->napi[0].tx_pending;
12376 }
12377
12378 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12379 {
12380         struct tg3 *tp = netdev_priv(dev);
12381         int i, irq_sync = 0, err = 0;
12382         bool reset_phy = false;
12383
12384         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12385             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12386             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12387             (ering->tx_pending <= MAX_SKB_FRAGS) ||
12388             (tg3_flag(tp, TSO_BUG) &&
12389              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12390                 return -EINVAL;
12391
12392         if (netif_running(dev)) {
12393                 tg3_phy_stop(tp);
12394                 tg3_netif_stop(tp);
12395                 irq_sync = 1;
12396         }
12397
12398         tg3_full_lock(tp, irq_sync);
12399
12400         tp->rx_pending = ering->rx_pending;
12401
12402         if (tg3_flag(tp, MAX_RXPEND_64) &&
12403             tp->rx_pending > 63)
12404                 tp->rx_pending = 63;
12405
12406         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12407                 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12408
12409         for (i = 0; i < tp->irq_max; i++)
12410                 tp->napi[i].tx_pending = ering->tx_pending;
12411
12412         if (netif_running(dev)) {
12413                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12414                 /* Reset PHY to avoid PHY lock up */
12415                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12416                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
12417                     tg3_asic_rev(tp) == ASIC_REV_5720)
12418                         reset_phy = true;
12419
12420                 err = tg3_restart_hw(tp, reset_phy);
12421                 if (!err)
12422                         tg3_netif_start(tp);
12423         }
12424
12425         tg3_full_unlock(tp);
12426
12427         if (irq_sync && !err)
12428                 tg3_phy_start(tp);
12429
12430         return err;
12431 }
12432
12433 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12434 {
12435         struct tg3 *tp = netdev_priv(dev);
12436
12437         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12438
12439         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12440                 epause->rx_pause = 1;
12441         else
12442                 epause->rx_pause = 0;
12443
12444         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12445                 epause->tx_pause = 1;
12446         else
12447                 epause->tx_pause = 0;
12448 }
12449
12450 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12451 {
12452         struct tg3 *tp = netdev_priv(dev);
12453         int err = 0;
12454         bool reset_phy = false;
12455
12456         if (tp->link_config.autoneg == AUTONEG_ENABLE)
12457                 tg3_warn_mgmt_link_flap(tp);
12458
12459         if (tg3_flag(tp, USE_PHYLIB)) {
12460                 u32 newadv;
12461                 struct phy_device *phydev;
12462
12463                 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
12464
12465                 if (!(phydev->supported & SUPPORTED_Pause) ||
12466                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12467                      (epause->rx_pause != epause->tx_pause)))
12468                         return -EINVAL;
12469
12470                 tp->link_config.flowctrl = 0;
12471                 if (epause->rx_pause) {
12472                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12473
12474                         if (epause->tx_pause) {
12475                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12476                                 newadv = ADVERTISED_Pause;
12477                         } else
12478                                 newadv = ADVERTISED_Pause |
12479                                          ADVERTISED_Asym_Pause;
12480                 } else if (epause->tx_pause) {
12481                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12482                         newadv = ADVERTISED_Asym_Pause;
12483                 } else
12484                         newadv = 0;
12485
12486                 if (epause->autoneg)
12487                         tg3_flag_set(tp, PAUSE_AUTONEG);
12488                 else
12489                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12490
12491                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12492                         u32 oldadv = phydev->advertising &
12493                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12494                         if (oldadv != newadv) {
12495                                 phydev->advertising &=
12496                                         ~(ADVERTISED_Pause |
12497                                           ADVERTISED_Asym_Pause);
12498                                 phydev->advertising |= newadv;
12499                                 if (phydev->autoneg) {
12500                                         /*
12501                                          * Always renegotiate the link to
12502                                          * inform our link partner of our
12503                                          * flow control settings, even if the
12504                                          * flow control is forced.  Let
12505                                          * tg3_adjust_link() do the final
12506                                          * flow control setup.
12507                                          */
12508                                         return phy_start_aneg(phydev);
12509                                 }
12510                         }
12511
12512                         if (!epause->autoneg)
12513                                 tg3_setup_flow_control(tp, 0, 0);
12514                 } else {
12515                         tp->link_config.advertising &=
12516                                         ~(ADVERTISED_Pause |
12517                                           ADVERTISED_Asym_Pause);
12518                         tp->link_config.advertising |= newadv;
12519                 }
12520         } else {
12521                 int irq_sync = 0;
12522
12523                 if (netif_running(dev)) {
12524                         tg3_netif_stop(tp);
12525                         irq_sync = 1;
12526                 }
12527
12528                 tg3_full_lock(tp, irq_sync);
12529
12530                 if (epause->autoneg)
12531                         tg3_flag_set(tp, PAUSE_AUTONEG);
12532                 else
12533                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12534                 if (epause->rx_pause)
12535                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12536                 else
12537                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12538                 if (epause->tx_pause)
12539                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12540                 else
12541                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12542
12543                 if (netif_running(dev)) {
12544                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12545                         /* Reset PHY to avoid PHY lock up */
12546                         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12547                             tg3_asic_rev(tp) == ASIC_REV_5719 ||
12548                             tg3_asic_rev(tp) == ASIC_REV_5720)
12549                                 reset_phy = true;
12550
12551                         err = tg3_restart_hw(tp, reset_phy);
12552                         if (!err)
12553                                 tg3_netif_start(tp);
12554                 }
12555
12556                 tg3_full_unlock(tp);
12557         }
12558
12559         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12560
12561         return err;
12562 }
12563
12564 static int tg3_get_sset_count(struct net_device *dev, int sset)
12565 {
12566         switch (sset) {
12567         case ETH_SS_TEST:
12568                 return TG3_NUM_TEST;
12569         case ETH_SS_STATS:
12570                 return TG3_NUM_STATS;
12571         default:
12572                 return -EOPNOTSUPP;
12573         }
12574 }
12575
12576 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12577                          u32 *rules __always_unused)
12578 {
12579         struct tg3 *tp = netdev_priv(dev);
12580
12581         if (!tg3_flag(tp, SUPPORT_MSIX))
12582                 return -EOPNOTSUPP;
12583
12584         switch (info->cmd) {
12585         case ETHTOOL_GRXRINGS:
12586                 if (netif_running(tp->dev))
12587                         info->data = tp->rxq_cnt;
12588                 else {
12589                         info->data = num_online_cpus();
12590                         if (info->data > TG3_RSS_MAX_NUM_QS)
12591                                 info->data = TG3_RSS_MAX_NUM_QS;
12592                 }
12593
12594                 /* The first interrupt vector only
12595                  * handles link interrupts.
12596                  */
12597                 info->data -= 1;
12598                 return 0;
12599
12600         default:
12601                 return -EOPNOTSUPP;
12602         }
12603 }
12604
12605 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12606 {
12607         u32 size = 0;
12608         struct tg3 *tp = netdev_priv(dev);
12609
12610         if (tg3_flag(tp, SUPPORT_MSIX))
12611                 size = TG3_RSS_INDIR_TBL_SIZE;
12612
12613         return size;
12614 }
12615
12616 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12617 {
12618         struct tg3 *tp = netdev_priv(dev);
12619         int i;
12620
12621         if (hfunc)
12622                 *hfunc = ETH_RSS_HASH_TOP;
12623         if (!indir)
12624                 return 0;
12625
12626         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12627                 indir[i] = tp->rss_ind_tbl[i];
12628
12629         return 0;
12630 }
12631
12632 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12633                         const u8 hfunc)
12634 {
12635         struct tg3 *tp = netdev_priv(dev);
12636         size_t i;
12637
12638         /* We require at least one supported parameter to be changed and no
12639          * change in any of the unsupported parameters
12640          */
12641         if (key ||
12642             (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12643                 return -EOPNOTSUPP;
12644
12645         if (!indir)
12646                 return 0;
12647
12648         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12649                 tp->rss_ind_tbl[i] = indir[i];
12650
12651         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12652                 return 0;
12653
12654         /* It is legal to write the indirection
12655          * table while the device is running.
12656          */
12657         tg3_full_lock(tp, 0);
12658         tg3_rss_write_indir_tbl(tp);
12659         tg3_full_unlock(tp);
12660
12661         return 0;
12662 }
12663
12664 static void tg3_get_channels(struct net_device *dev,
12665                              struct ethtool_channels *channel)
12666 {
12667         struct tg3 *tp = netdev_priv(dev);
12668         u32 deflt_qs = netif_get_num_default_rss_queues();
12669
12670         channel->max_rx = tp->rxq_max;
12671         channel->max_tx = tp->txq_max;
12672
12673         if (netif_running(dev)) {
12674                 channel->rx_count = tp->rxq_cnt;
12675                 channel->tx_count = tp->txq_cnt;
12676         } else {
12677                 if (tp->rxq_req)
12678                         channel->rx_count = tp->rxq_req;
12679                 else
12680                         channel->rx_count = min(deflt_qs, tp->rxq_max);
12681
12682                 if (tp->txq_req)
12683                         channel->tx_count = tp->txq_req;
12684                 else
12685                         channel->tx_count = min(deflt_qs, tp->txq_max);
12686         }
12687 }
12688
12689 static int tg3_set_channels(struct net_device *dev,
12690                             struct ethtool_channels *channel)
12691 {
12692         struct tg3 *tp = netdev_priv(dev);
12693
12694         if (!tg3_flag(tp, SUPPORT_MSIX))
12695                 return -EOPNOTSUPP;
12696
12697         if (channel->rx_count > tp->rxq_max ||
12698             channel->tx_count > tp->txq_max)
12699                 return -EINVAL;
12700
12701         tp->rxq_req = channel->rx_count;
12702         tp->txq_req = channel->tx_count;
12703
12704         if (!netif_running(dev))
12705                 return 0;
12706
12707         tg3_stop(tp);
12708
12709         tg3_carrier_off(tp);
12710
12711         tg3_start(tp, true, false, false);
12712
12713         return 0;
12714 }
12715
12716 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12717 {
12718         switch (stringset) {
12719         case ETH_SS_STATS:
12720                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12721                 break;
12722         case ETH_SS_TEST:
12723                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12724                 break;
12725         default:
12726                 WARN_ON(1);     /* we need a WARN() */
12727                 break;
12728         }
12729 }
12730
12731 static int tg3_set_phys_id(struct net_device *dev,
12732                             enum ethtool_phys_id_state state)
12733 {
12734         struct tg3 *tp = netdev_priv(dev);
12735
12736         if (!netif_running(tp->dev))
12737                 return -EAGAIN;
12738
12739         switch (state) {
12740         case ETHTOOL_ID_ACTIVE:
12741                 return 1;       /* cycle on/off once per second */
12742
12743         case ETHTOOL_ID_ON:
12744                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12745                      LED_CTRL_1000MBPS_ON |
12746                      LED_CTRL_100MBPS_ON |
12747                      LED_CTRL_10MBPS_ON |
12748                      LED_CTRL_TRAFFIC_OVERRIDE |
12749                      LED_CTRL_TRAFFIC_BLINK |
12750                      LED_CTRL_TRAFFIC_LED);
12751                 break;
12752
12753         case ETHTOOL_ID_OFF:
12754                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12755                      LED_CTRL_TRAFFIC_OVERRIDE);
12756                 break;
12757
12758         case ETHTOOL_ID_INACTIVE:
12759                 tw32(MAC_LED_CTRL, tp->led_ctrl);
12760                 break;
12761         }
12762
12763         return 0;
12764 }
12765
12766 static void tg3_get_ethtool_stats(struct net_device *dev,
12767                                    struct ethtool_stats *estats, u64 *tmp_stats)
12768 {
12769         struct tg3 *tp = netdev_priv(dev);
12770
12771         if (tp->hw_stats)
12772                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12773         else
12774                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12775 }
12776
12777 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12778 {
12779         int i;
12780         __be32 *buf;
12781         u32 offset = 0, len = 0;
12782         u32 magic, val;
12783
12784         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12785                 return NULL;
12786
12787         if (magic == TG3_EEPROM_MAGIC) {
12788                 for (offset = TG3_NVM_DIR_START;
12789                      offset < TG3_NVM_DIR_END;
12790                      offset += TG3_NVM_DIRENT_SIZE) {
12791                         if (tg3_nvram_read(tp, offset, &val))
12792                                 return NULL;
12793
12794                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12795                             TG3_NVM_DIRTYPE_EXTVPD)
12796                                 break;
12797                 }
12798
12799                 if (offset != TG3_NVM_DIR_END) {
12800                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12801                         if (tg3_nvram_read(tp, offset + 4, &offset))
12802                                 return NULL;
12803
12804                         offset = tg3_nvram_logical_addr(tp, offset);
12805                 }
12806         }
12807
12808         if (!offset || !len) {
12809                 offset = TG3_NVM_VPD_OFF;
12810                 len = TG3_NVM_VPD_LEN;
12811         }
12812
12813         buf = kmalloc(len, GFP_KERNEL);
12814         if (buf == NULL)
12815                 return NULL;
12816
12817         if (magic == TG3_EEPROM_MAGIC) {
12818                 for (i = 0; i < len; i += 4) {
12819                         /* The data is in little-endian format in NVRAM.
12820                          * Use the big-endian read routines to preserve
12821                          * the byte order as it exists in NVRAM.
12822                          */
12823                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12824                                 goto error;
12825                 }
12826         } else {
12827                 u8 *ptr;
12828                 ssize_t cnt;
12829                 unsigned int pos = 0;
12830
12831                 ptr = (u8 *)&buf[0];
12832                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12833                         cnt = pci_read_vpd(tp->pdev, pos,
12834                                            len - pos, ptr);
12835                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
12836                                 cnt = 0;
12837                         else if (cnt < 0)
12838                                 goto error;
12839                 }
12840                 if (pos != len)
12841                         goto error;
12842         }
12843
12844         *vpdlen = len;
12845
12846         return buf;
12847
12848 error:
12849         kfree(buf);
12850         return NULL;
12851 }
12852
12853 #define NVRAM_TEST_SIZE 0x100
12854 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
12855 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
12856 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
12857 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
12858 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
12859 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
12860 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12861 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12862
12863 static int tg3_test_nvram(struct tg3 *tp)
12864 {
12865         u32 csum, magic, len;
12866         __be32 *buf;
12867         int i, j, k, err = 0, size;
12868
12869         if (tg3_flag(tp, NO_NVRAM))
12870                 return 0;
12871
12872         if (tg3_nvram_read(tp, 0, &magic) != 0)
12873                 return -EIO;
12874
12875         if (magic == TG3_EEPROM_MAGIC)
12876                 size = NVRAM_TEST_SIZE;
12877         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12878                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12879                     TG3_EEPROM_SB_FORMAT_1) {
12880                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12881                         case TG3_EEPROM_SB_REVISION_0:
12882                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12883                                 break;
12884                         case TG3_EEPROM_SB_REVISION_2:
12885                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12886                                 break;
12887                         case TG3_EEPROM_SB_REVISION_3:
12888                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12889                                 break;
12890                         case TG3_EEPROM_SB_REVISION_4:
12891                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12892                                 break;
12893                         case TG3_EEPROM_SB_REVISION_5:
12894                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12895                                 break;
12896                         case TG3_EEPROM_SB_REVISION_6:
12897                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12898                                 break;
12899                         default:
12900                                 return -EIO;
12901                         }
12902                 } else
12903                         return 0;
12904         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12905                 size = NVRAM_SELFBOOT_HW_SIZE;
12906         else
12907                 return -EIO;
12908
12909         buf = kmalloc(size, GFP_KERNEL);
12910         if (buf == NULL)
12911                 return -ENOMEM;
12912
12913         err = -EIO;
12914         for (i = 0, j = 0; i < size; i += 4, j++) {
12915                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12916                 if (err)
12917                         break;
12918         }
12919         if (i < size)
12920                 goto out;
12921
12922         /* Selfboot format */
12923         magic = be32_to_cpu(buf[0]);
12924         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12925             TG3_EEPROM_MAGIC_FW) {
12926                 u8 *buf8 = (u8 *) buf, csum8 = 0;
12927
12928                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12929                     TG3_EEPROM_SB_REVISION_2) {
12930                         /* For rev 2, the csum doesn't include the MBA. */
12931                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12932                                 csum8 += buf8[i];
12933                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12934                                 csum8 += buf8[i];
12935                 } else {
12936                         for (i = 0; i < size; i++)
12937                                 csum8 += buf8[i];
12938                 }
12939
12940                 if (csum8 == 0) {
12941                         err = 0;
12942                         goto out;
12943                 }
12944
12945                 err = -EIO;
12946                 goto out;
12947         }
12948
12949         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12950             TG3_EEPROM_MAGIC_HW) {
12951                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12952                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12953                 u8 *buf8 = (u8 *) buf;
12954
12955                 /* Separate the parity bits and the data bytes.  */
12956                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12957                         if ((i == 0) || (i == 8)) {
12958                                 int l;
12959                                 u8 msk;
12960
12961                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12962                                         parity[k++] = buf8[i] & msk;
12963                                 i++;
12964                         } else if (i == 16) {
12965                                 int l;
12966                                 u8 msk;
12967
12968                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12969                                         parity[k++] = buf8[i] & msk;
12970                                 i++;
12971
12972                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12973                                         parity[k++] = buf8[i] & msk;
12974                                 i++;
12975                         }
12976                         data[j++] = buf8[i];
12977                 }
12978
12979                 err = -EIO;
12980                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12981                         u8 hw8 = hweight8(data[i]);
12982
12983                         if ((hw8 & 0x1) && parity[i])
12984                                 goto out;
12985                         else if (!(hw8 & 0x1) && !parity[i])
12986                                 goto out;
12987                 }
12988                 err = 0;
12989                 goto out;
12990         }
12991
12992         err = -EIO;
12993
12994         /* Bootstrap checksum at offset 0x10 */
12995         csum = calc_crc((unsigned char *) buf, 0x10);
12996         if (csum != le32_to_cpu(buf[0x10/4]))
12997                 goto out;
12998
12999         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13000         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
13001         if (csum != le32_to_cpu(buf[0xfc/4]))
13002                 goto out;
13003
13004         kfree(buf);
13005
13006         buf = tg3_vpd_readblock(tp, &len);
13007         if (!buf)
13008                 return -ENOMEM;
13009
13010         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
13011         if (i > 0) {
13012                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
13013                 if (j < 0)
13014                         goto out;
13015
13016                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
13017                         goto out;
13018
13019                 i += PCI_VPD_LRDT_TAG_SIZE;
13020                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
13021                                               PCI_VPD_RO_KEYWORD_CHKSUM);
13022                 if (j > 0) {
13023                         u8 csum8 = 0;
13024
13025                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
13026
13027                         for (i = 0; i <= j; i++)
13028                                 csum8 += ((u8 *)buf)[i];
13029
13030                         if (csum8)
13031                                 goto out;
13032                 }
13033         }
13034
13035         err = 0;
13036
13037 out:
13038         kfree(buf);
13039         return err;
13040 }
13041
13042 #define TG3_SERDES_TIMEOUT_SEC  2
13043 #define TG3_COPPER_TIMEOUT_SEC  6
13044
13045 static int tg3_test_link(struct tg3 *tp)
13046 {
13047         int i, max;
13048
13049         if (!netif_running(tp->dev))
13050                 return -ENODEV;
13051
13052         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13053                 max = TG3_SERDES_TIMEOUT_SEC;
13054         else
13055                 max = TG3_COPPER_TIMEOUT_SEC;
13056
13057         for (i = 0; i < max; i++) {
13058                 if (tp->link_up)
13059                         return 0;
13060
13061                 if (msleep_interruptible(1000))
13062                         break;
13063         }
13064
13065         return -EIO;
13066 }
13067
13068 /* Only test the commonly used registers */
13069 static int tg3_test_registers(struct tg3 *tp)
13070 {
13071         int i, is_5705, is_5750;
13072         u32 offset, read_mask, write_mask, val, save_val, read_val;
13073         static struct {
13074                 u16 offset;
13075                 u16 flags;
13076 #define TG3_FL_5705     0x1
13077 #define TG3_FL_NOT_5705 0x2
13078 #define TG3_FL_NOT_5788 0x4
13079 #define TG3_FL_NOT_5750 0x8
13080                 u32 read_mask;
13081                 u32 write_mask;
13082         } reg_tbl[] = {
13083                 /* MAC Control Registers */
13084                 { MAC_MODE, TG3_FL_NOT_5705,
13085                         0x00000000, 0x00ef6f8c },
13086                 { MAC_MODE, TG3_FL_5705,
13087                         0x00000000, 0x01ef6b8c },
13088                 { MAC_STATUS, TG3_FL_NOT_5705,
13089                         0x03800107, 0x00000000 },
13090                 { MAC_STATUS, TG3_FL_5705,
13091                         0x03800100, 0x00000000 },
13092                 { MAC_ADDR_0_HIGH, 0x0000,
13093                         0x00000000, 0x0000ffff },
13094                 { MAC_ADDR_0_LOW, 0x0000,
13095                         0x00000000, 0xffffffff },
13096                 { MAC_RX_MTU_SIZE, 0x0000,
13097                         0x00000000, 0x0000ffff },
13098                 { MAC_TX_MODE, 0x0000,
13099                         0x00000000, 0x00000070 },
13100                 { MAC_TX_LENGTHS, 0x0000,
13101                         0x00000000, 0x00003fff },
13102                 { MAC_RX_MODE, TG3_FL_NOT_5705,
13103                         0x00000000, 0x000007fc },
13104                 { MAC_RX_MODE, TG3_FL_5705,
13105                         0x00000000, 0x000007dc },
13106                 { MAC_HASH_REG_0, 0x0000,
13107                         0x00000000, 0xffffffff },
13108                 { MAC_HASH_REG_1, 0x0000,
13109                         0x00000000, 0xffffffff },
13110                 { MAC_HASH_REG_2, 0x0000,
13111                         0x00000000, 0xffffffff },
13112                 { MAC_HASH_REG_3, 0x0000,
13113                         0x00000000, 0xffffffff },
13114
13115                 /* Receive Data and Receive BD Initiator Control Registers. */
13116                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13117                         0x00000000, 0xffffffff },
13118                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13119                         0x00000000, 0xffffffff },
13120                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13121                         0x00000000, 0x00000003 },
13122                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13123                         0x00000000, 0xffffffff },
13124                 { RCVDBDI_STD_BD+0, 0x0000,
13125                         0x00000000, 0xffffffff },
13126                 { RCVDBDI_STD_BD+4, 0x0000,
13127                         0x00000000, 0xffffffff },
13128                 { RCVDBDI_STD_BD+8, 0x0000,
13129                         0x00000000, 0xffff0002 },
13130                 { RCVDBDI_STD_BD+0xc, 0x0000,
13131                         0x00000000, 0xffffffff },
13132
13133                 /* Receive BD Initiator Control Registers. */
13134                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13135                         0x00000000, 0xffffffff },
13136                 { RCVBDI_STD_THRESH, TG3_FL_5705,
13137                         0x00000000, 0x000003ff },
13138                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13139                         0x00000000, 0xffffffff },
13140
13141                 /* Host Coalescing Control Registers. */
13142                 { HOSTCC_MODE, TG3_FL_NOT_5705,
13143                         0x00000000, 0x00000004 },
13144                 { HOSTCC_MODE, TG3_FL_5705,
13145                         0x00000000, 0x000000f6 },
13146                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13147                         0x00000000, 0xffffffff },
13148                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13149                         0x00000000, 0x000003ff },
13150                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13151                         0x00000000, 0xffffffff },
13152                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13153                         0x00000000, 0x000003ff },
13154                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13155                         0x00000000, 0xffffffff },
13156                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13157                         0x00000000, 0x000000ff },
13158                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13159                         0x00000000, 0xffffffff },
13160                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13161                         0x00000000, 0x000000ff },
13162                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13163                         0x00000000, 0xffffffff },
13164                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13165                         0x00000000, 0xffffffff },
13166                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13167                         0x00000000, 0xffffffff },
13168                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13169                         0x00000000, 0x000000ff },
13170                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13171                         0x00000000, 0xffffffff },
13172                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13173                         0x00000000, 0x000000ff },
13174                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13175                         0x00000000, 0xffffffff },
13176                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13177                         0x00000000, 0xffffffff },
13178                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13179                         0x00000000, 0xffffffff },
13180                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13181                         0x00000000, 0xffffffff },
13182                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13183                         0x00000000, 0xffffffff },
13184                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13185                         0xffffffff, 0x00000000 },
13186                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13187                         0xffffffff, 0x00000000 },
13188
13189                 /* Buffer Manager Control Registers. */
13190                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13191                         0x00000000, 0x007fff80 },
13192                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13193                         0x00000000, 0x007fffff },
13194                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13195                         0x00000000, 0x0000003f },
13196                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13197                         0x00000000, 0x000001ff },
13198                 { BUFMGR_MB_HIGH_WATER, 0x0000,
13199                         0x00000000, 0x000001ff },
13200                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13201                         0xffffffff, 0x00000000 },
13202                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13203                         0xffffffff, 0x00000000 },
13204
13205                 /* Mailbox Registers */
13206                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13207                         0x00000000, 0x000001ff },
13208                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13209                         0x00000000, 0x000001ff },
13210                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13211                         0x00000000, 0x000007ff },
13212                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13213                         0x00000000, 0x000001ff },
13214
13215                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13216         };
13217
13218         is_5705 = is_5750 = 0;
13219         if (tg3_flag(tp, 5705_PLUS)) {
13220                 is_5705 = 1;
13221                 if (tg3_flag(tp, 5750_PLUS))
13222                         is_5750 = 1;
13223         }
13224
13225         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13226                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13227                         continue;
13228
13229                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13230                         continue;
13231
13232                 if (tg3_flag(tp, IS_5788) &&
13233                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
13234                         continue;
13235
13236                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13237                         continue;
13238
13239                 offset = (u32) reg_tbl[i].offset;
13240                 read_mask = reg_tbl[i].read_mask;
13241                 write_mask = reg_tbl[i].write_mask;
13242
13243                 /* Save the original register content */
13244                 save_val = tr32(offset);
13245
13246                 /* Determine the read-only value. */
13247                 read_val = save_val & read_mask;
13248
13249                 /* Write zero to the register, then make sure the read-only bits
13250                  * are not changed and the read/write bits are all zeros.
13251                  */
13252                 tw32(offset, 0);
13253
13254                 val = tr32(offset);
13255
13256                 /* Test the read-only and read/write bits. */
13257                 if (((val & read_mask) != read_val) || (val & write_mask))
13258                         goto out;
13259
13260                 /* Write ones to all the bits defined by RdMask and WrMask, then
13261                  * make sure the read-only bits are not changed and the
13262                  * read/write bits are all ones.
13263                  */
13264                 tw32(offset, read_mask | write_mask);
13265
13266                 val = tr32(offset);
13267
13268                 /* Test the read-only bits. */
13269                 if ((val & read_mask) != read_val)
13270                         goto out;
13271
13272                 /* Test the read/write bits. */
13273                 if ((val & write_mask) != write_mask)
13274                         goto out;
13275
13276                 tw32(offset, save_val);
13277         }
13278
13279         return 0;
13280
13281 out:
13282         if (netif_msg_hw(tp))
13283                 netdev_err(tp->dev,
13284                            "Register test failed at offset %x\n", offset);
13285         tw32(offset, save_val);
13286         return -EIO;
13287 }
13288
13289 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13290 {
13291         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13292         int i;
13293         u32 j;
13294
13295         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13296                 for (j = 0; j < len; j += 4) {
13297                         u32 val;
13298
13299                         tg3_write_mem(tp, offset + j, test_pattern[i]);
13300                         tg3_read_mem(tp, offset + j, &val);
13301                         if (val != test_pattern[i])
13302                                 return -EIO;
13303                 }
13304         }
13305         return 0;
13306 }
13307
13308 static int tg3_test_memory(struct tg3 *tp)
13309 {
13310         static struct mem_entry {
13311                 u32 offset;
13312                 u32 len;
13313         } mem_tbl_570x[] = {
13314                 { 0x00000000, 0x00b50},
13315                 { 0x00002000, 0x1c000},
13316                 { 0xffffffff, 0x00000}
13317         }, mem_tbl_5705[] = {
13318                 { 0x00000100, 0x0000c},
13319                 { 0x00000200, 0x00008},
13320                 { 0x00004000, 0x00800},
13321                 { 0x00006000, 0x01000},
13322                 { 0x00008000, 0x02000},
13323                 { 0x00010000, 0x0e000},
13324                 { 0xffffffff, 0x00000}
13325         }, mem_tbl_5755[] = {
13326                 { 0x00000200, 0x00008},
13327                 { 0x00004000, 0x00800},
13328                 { 0x00006000, 0x00800},
13329                 { 0x00008000, 0x02000},
13330                 { 0x00010000, 0x0c000},
13331                 { 0xffffffff, 0x00000}
13332         }, mem_tbl_5906[] = {
13333                 { 0x00000200, 0x00008},
13334                 { 0x00004000, 0x00400},
13335                 { 0x00006000, 0x00400},
13336                 { 0x00008000, 0x01000},
13337                 { 0x00010000, 0x01000},
13338                 { 0xffffffff, 0x00000}
13339         }, mem_tbl_5717[] = {
13340                 { 0x00000200, 0x00008},
13341                 { 0x00010000, 0x0a000},
13342                 { 0x00020000, 0x13c00},
13343                 { 0xffffffff, 0x00000}
13344         }, mem_tbl_57765[] = {
13345                 { 0x00000200, 0x00008},
13346                 { 0x00004000, 0x00800},
13347                 { 0x00006000, 0x09800},
13348                 { 0x00010000, 0x0a000},
13349                 { 0xffffffff, 0x00000}
13350         };
13351         struct mem_entry *mem_tbl;
13352         int err = 0;
13353         int i;
13354
13355         if (tg3_flag(tp, 5717_PLUS))
13356                 mem_tbl = mem_tbl_5717;
13357         else if (tg3_flag(tp, 57765_CLASS) ||
13358                  tg3_asic_rev(tp) == ASIC_REV_5762)
13359                 mem_tbl = mem_tbl_57765;
13360         else if (tg3_flag(tp, 5755_PLUS))
13361                 mem_tbl = mem_tbl_5755;
13362         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13363                 mem_tbl = mem_tbl_5906;
13364         else if (tg3_flag(tp, 5705_PLUS))
13365                 mem_tbl = mem_tbl_5705;
13366         else
13367                 mem_tbl = mem_tbl_570x;
13368
13369         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13370                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13371                 if (err)
13372                         break;
13373         }
13374
13375         return err;
13376 }
13377
13378 #define TG3_TSO_MSS             500
13379
13380 #define TG3_TSO_IP_HDR_LEN      20
13381 #define TG3_TSO_TCP_HDR_LEN     20
13382 #define TG3_TSO_TCP_OPT_LEN     12
13383
13384 static const u8 tg3_tso_header[] = {
13385 0x08, 0x00,
13386 0x45, 0x00, 0x00, 0x00,
13387 0x00, 0x00, 0x40, 0x00,
13388 0x40, 0x06, 0x00, 0x00,
13389 0x0a, 0x00, 0x00, 0x01,
13390 0x0a, 0x00, 0x00, 0x02,
13391 0x0d, 0x00, 0xe0, 0x00,
13392 0x00, 0x00, 0x01, 0x00,
13393 0x00, 0x00, 0x02, 0x00,
13394 0x80, 0x10, 0x10, 0x00,
13395 0x14, 0x09, 0x00, 0x00,
13396 0x01, 0x01, 0x08, 0x0a,
13397 0x11, 0x11, 0x11, 0x11,
13398 0x11, 0x11, 0x11, 0x11,
13399 };
13400
13401 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13402 {
13403         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13404         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13405         u32 budget;
13406         struct sk_buff *skb;
13407         u8 *tx_data, *rx_data;
13408         dma_addr_t map;
13409         int num_pkts, tx_len, rx_len, i, err;
13410         struct tg3_rx_buffer_desc *desc;
13411         struct tg3_napi *tnapi, *rnapi;
13412         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13413
13414         tnapi = &tp->napi[0];
13415         rnapi = &tp->napi[0];
13416         if (tp->irq_cnt > 1) {
13417                 if (tg3_flag(tp, ENABLE_RSS))
13418                         rnapi = &tp->napi[1];
13419                 if (tg3_flag(tp, ENABLE_TSS))
13420                         tnapi = &tp->napi[1];
13421         }
13422         coal_now = tnapi->coal_now | rnapi->coal_now;
13423
13424         err = -EIO;
13425
13426         tx_len = pktsz;
13427         skb = netdev_alloc_skb(tp->dev, tx_len);
13428         if (!skb)
13429                 return -ENOMEM;
13430
13431         tx_data = skb_put(skb, tx_len);
13432         memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13433         memset(tx_data + ETH_ALEN, 0x0, 8);
13434
13435         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13436
13437         if (tso_loopback) {
13438                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13439
13440                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13441                               TG3_TSO_TCP_OPT_LEN;
13442
13443                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13444                        sizeof(tg3_tso_header));
13445                 mss = TG3_TSO_MSS;
13446
13447                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13448                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13449
13450                 /* Set the total length field in the IP header */
13451                 iph->tot_len = htons((u16)(mss + hdr_len));
13452
13453                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13454                               TXD_FLAG_CPU_POST_DMA);
13455
13456                 if (tg3_flag(tp, HW_TSO_1) ||
13457                     tg3_flag(tp, HW_TSO_2) ||
13458                     tg3_flag(tp, HW_TSO_3)) {
13459                         struct tcphdr *th;
13460                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13461                         th = (struct tcphdr *)&tx_data[val];
13462                         th->check = 0;
13463                 } else
13464                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
13465
13466                 if (tg3_flag(tp, HW_TSO_3)) {
13467                         mss |= (hdr_len & 0xc) << 12;
13468                         if (hdr_len & 0x10)
13469                                 base_flags |= 0x00000010;
13470                         base_flags |= (hdr_len & 0x3e0) << 5;
13471                 } else if (tg3_flag(tp, HW_TSO_2))
13472                         mss |= hdr_len << 9;
13473                 else if (tg3_flag(tp, HW_TSO_1) ||
13474                          tg3_asic_rev(tp) == ASIC_REV_5705) {
13475                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13476                 } else {
13477                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13478                 }
13479
13480                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13481         } else {
13482                 num_pkts = 1;
13483                 data_off = ETH_HLEN;
13484
13485                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13486                     tx_len > VLAN_ETH_FRAME_LEN)
13487                         base_flags |= TXD_FLAG_JMB_PKT;
13488         }
13489
13490         for (i = data_off; i < tx_len; i++)
13491                 tx_data[i] = (u8) (i & 0xff);
13492
13493         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13494         if (pci_dma_mapping_error(tp->pdev, map)) {
13495                 dev_kfree_skb(skb);
13496                 return -EIO;
13497         }
13498
13499         val = tnapi->tx_prod;
13500         tnapi->tx_buffers[val].skb = skb;
13501         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13502
13503         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13504                rnapi->coal_now);
13505
13506         udelay(10);
13507
13508         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13509
13510         budget = tg3_tx_avail(tnapi);
13511         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13512                             base_flags | TXD_FLAG_END, mss, 0)) {
13513                 tnapi->tx_buffers[val].skb = NULL;
13514                 dev_kfree_skb(skb);
13515                 return -EIO;
13516         }
13517
13518         tnapi->tx_prod++;
13519
13520         /* Sync BD data before updating mailbox */
13521         wmb();
13522
13523         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13524         tr32_mailbox(tnapi->prodmbox);
13525
13526         udelay(10);
13527
13528         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13529         for (i = 0; i < 35; i++) {
13530                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13531                        coal_now);
13532
13533                 udelay(10);
13534
13535                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13536                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13537                 if ((tx_idx == tnapi->tx_prod) &&
13538                     (rx_idx == (rx_start_idx + num_pkts)))
13539                         break;
13540         }
13541
13542         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13543         dev_kfree_skb(skb);
13544
13545         if (tx_idx != tnapi->tx_prod)
13546                 goto out;
13547
13548         if (rx_idx != rx_start_idx + num_pkts)
13549                 goto out;
13550
13551         val = data_off;
13552         while (rx_idx != rx_start_idx) {
13553                 desc = &rnapi->rx_rcb[rx_start_idx++];
13554                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13555                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13556
13557                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13558                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13559                         goto out;
13560
13561                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13562                          - ETH_FCS_LEN;
13563
13564                 if (!tso_loopback) {
13565                         if (rx_len != tx_len)
13566                                 goto out;
13567
13568                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13569                                 if (opaque_key != RXD_OPAQUE_RING_STD)
13570                                         goto out;
13571                         } else {
13572                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13573                                         goto out;
13574                         }
13575                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13576                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13577                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
13578                         goto out;
13579                 }
13580
13581                 if (opaque_key == RXD_OPAQUE_RING_STD) {
13582                         rx_data = tpr->rx_std_buffers[desc_idx].data;
13583                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13584                                              mapping);
13585                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13586                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13587                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13588                                              mapping);
13589                 } else
13590                         goto out;
13591
13592                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13593                                             PCI_DMA_FROMDEVICE);
13594
13595                 rx_data += TG3_RX_OFFSET(tp);
13596                 for (i = data_off; i < rx_len; i++, val++) {
13597                         if (*(rx_data + i) != (u8) (val & 0xff))
13598                                 goto out;
13599                 }
13600         }
13601
13602         err = 0;
13603
13604         /* tg3_free_rings will unmap and free the rx_data */
13605 out:
13606         return err;
13607 }
13608
13609 #define TG3_STD_LOOPBACK_FAILED         1
13610 #define TG3_JMB_LOOPBACK_FAILED         2
13611 #define TG3_TSO_LOOPBACK_FAILED         4
13612 #define TG3_LOOPBACK_FAILED \
13613         (TG3_STD_LOOPBACK_FAILED | \
13614          TG3_JMB_LOOPBACK_FAILED | \
13615          TG3_TSO_LOOPBACK_FAILED)
13616
13617 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13618 {
13619         int err = -EIO;
13620         u32 eee_cap;
13621         u32 jmb_pkt_sz = 9000;
13622
13623         if (tp->dma_limit)
13624                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13625
13626         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13627         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13628
13629         if (!netif_running(tp->dev)) {
13630                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13631                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13632                 if (do_extlpbk)
13633                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13634                 goto done;
13635         }
13636
13637         err = tg3_reset_hw(tp, true);
13638         if (err) {
13639                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13640                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13641                 if (do_extlpbk)
13642                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13643                 goto done;
13644         }
13645
13646         if (tg3_flag(tp, ENABLE_RSS)) {
13647                 int i;
13648
13649                 /* Reroute all rx packets to the 1st queue */
13650                 for (i = MAC_RSS_INDIR_TBL_0;
13651                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13652                         tw32(i, 0x0);
13653         }
13654
13655         /* HW errata - mac loopback fails in some cases on 5780.
13656          * Normal traffic and PHY loopback are not affected by
13657          * errata.  Also, the MAC loopback test is deprecated for
13658          * all newer ASIC revisions.
13659          */
13660         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13661             !tg3_flag(tp, CPMU_PRESENT)) {
13662                 tg3_mac_loopback(tp, true);
13663
13664                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13665                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13666
13667                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13668                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13669                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13670
13671                 tg3_mac_loopback(tp, false);
13672         }
13673
13674         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13675             !tg3_flag(tp, USE_PHYLIB)) {
13676                 int i;
13677
13678                 tg3_phy_lpbk_set(tp, 0, false);
13679
13680                 /* Wait for link */
13681                 for (i = 0; i < 100; i++) {
13682                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13683                                 break;
13684                         mdelay(1);
13685                 }
13686
13687                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13688                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13689                 if (tg3_flag(tp, TSO_CAPABLE) &&
13690                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13691                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13692                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13693                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13694                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13695
13696                 if (do_extlpbk) {
13697                         tg3_phy_lpbk_set(tp, 0, true);
13698
13699                         /* All link indications report up, but the hardware
13700                          * isn't really ready for about 20 msec.  Double it
13701                          * to be sure.
13702                          */
13703                         mdelay(40);
13704
13705                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13706                                 data[TG3_EXT_LOOPB_TEST] |=
13707                                                         TG3_STD_LOOPBACK_FAILED;
13708                         if (tg3_flag(tp, TSO_CAPABLE) &&
13709                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13710                                 data[TG3_EXT_LOOPB_TEST] |=
13711                                                         TG3_TSO_LOOPBACK_FAILED;
13712                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13713                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13714                                 data[TG3_EXT_LOOPB_TEST] |=
13715                                                         TG3_JMB_LOOPBACK_FAILED;
13716                 }
13717
13718                 /* Re-enable gphy autopowerdown. */
13719                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13720                         tg3_phy_toggle_apd(tp, true);
13721         }
13722
13723         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13724                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13725
13726 done:
13727         tp->phy_flags |= eee_cap;
13728
13729         return err;
13730 }
13731
13732 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13733                           u64 *data)
13734 {
13735         struct tg3 *tp = netdev_priv(dev);
13736         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13737
13738         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13739                 if (tg3_power_up(tp)) {
13740                         etest->flags |= ETH_TEST_FL_FAILED;
13741                         memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13742                         return;
13743                 }
13744                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13745         }
13746
13747         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13748
13749         if (tg3_test_nvram(tp) != 0) {
13750                 etest->flags |= ETH_TEST_FL_FAILED;
13751                 data[TG3_NVRAM_TEST] = 1;
13752         }
13753         if (!doextlpbk && tg3_test_link(tp)) {
13754                 etest->flags |= ETH_TEST_FL_FAILED;
13755                 data[TG3_LINK_TEST] = 1;
13756         }
13757         if (etest->flags & ETH_TEST_FL_OFFLINE) {
13758                 int err, err2 = 0, irq_sync = 0;
13759
13760                 if (netif_running(dev)) {
13761                         tg3_phy_stop(tp);
13762                         tg3_netif_stop(tp);
13763                         irq_sync = 1;
13764                 }
13765
13766                 tg3_full_lock(tp, irq_sync);
13767                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13768                 err = tg3_nvram_lock(tp);
13769                 tg3_halt_cpu(tp, RX_CPU_BASE);
13770                 if (!tg3_flag(tp, 5705_PLUS))
13771                         tg3_halt_cpu(tp, TX_CPU_BASE);
13772                 if (!err)
13773                         tg3_nvram_unlock(tp);
13774
13775                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13776                         tg3_phy_reset(tp);
13777
13778                 if (tg3_test_registers(tp) != 0) {
13779                         etest->flags |= ETH_TEST_FL_FAILED;
13780                         data[TG3_REGISTER_TEST] = 1;
13781                 }
13782
13783                 if (tg3_test_memory(tp) != 0) {
13784                         etest->flags |= ETH_TEST_FL_FAILED;
13785                         data[TG3_MEMORY_TEST] = 1;
13786                 }
13787
13788                 if (doextlpbk)
13789                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13790
13791                 if (tg3_test_loopback(tp, data, doextlpbk))
13792                         etest->flags |= ETH_TEST_FL_FAILED;
13793
13794                 tg3_full_unlock(tp);
13795
13796                 if (tg3_test_interrupt(tp) != 0) {
13797                         etest->flags |= ETH_TEST_FL_FAILED;
13798                         data[TG3_INTERRUPT_TEST] = 1;
13799                 }
13800
13801                 tg3_full_lock(tp, 0);
13802
13803                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13804                 if (netif_running(dev)) {
13805                         tg3_flag_set(tp, INIT_COMPLETE);
13806                         err2 = tg3_restart_hw(tp, true);
13807                         if (!err2)
13808                                 tg3_netif_start(tp);
13809                 }
13810
13811                 tg3_full_unlock(tp);
13812
13813                 if (irq_sync && !err2)
13814                         tg3_phy_start(tp);
13815         }
13816         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13817                 tg3_power_down_prepare(tp);
13818
13819 }
13820
13821 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13822 {
13823         struct tg3 *tp = netdev_priv(dev);
13824         struct hwtstamp_config stmpconf;
13825
13826         if (!tg3_flag(tp, PTP_CAPABLE))
13827                 return -EOPNOTSUPP;
13828
13829         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13830                 return -EFAULT;
13831
13832         if (stmpconf.flags)
13833                 return -EINVAL;
13834
13835         if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13836             stmpconf.tx_type != HWTSTAMP_TX_OFF)
13837                 return -ERANGE;
13838
13839         switch (stmpconf.rx_filter) {
13840         case HWTSTAMP_FILTER_NONE:
13841                 tp->rxptpctl = 0;
13842                 break;
13843         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13844                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13845                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13846                 break;
13847         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13848                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13849                                TG3_RX_PTP_CTL_SYNC_EVNT;
13850                 break;
13851         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13852                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13853                                TG3_RX_PTP_CTL_DELAY_REQ;
13854                 break;
13855         case HWTSTAMP_FILTER_PTP_V2_EVENT:
13856                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13857                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13858                 break;
13859         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13860                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13861                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13862                 break;
13863         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13864                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13865                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13866                 break;
13867         case HWTSTAMP_FILTER_PTP_V2_SYNC:
13868                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13869                                TG3_RX_PTP_CTL_SYNC_EVNT;
13870                 break;
13871         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13872                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13873                                TG3_RX_PTP_CTL_SYNC_EVNT;
13874                 break;
13875         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13876                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13877                                TG3_RX_PTP_CTL_SYNC_EVNT;
13878                 break;
13879         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13880                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13881                                TG3_RX_PTP_CTL_DELAY_REQ;
13882                 break;
13883         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13884                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13885                                TG3_RX_PTP_CTL_DELAY_REQ;
13886                 break;
13887         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13888                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13889                                TG3_RX_PTP_CTL_DELAY_REQ;
13890                 break;
13891         default:
13892                 return -ERANGE;
13893         }
13894
13895         if (netif_running(dev) && tp->rxptpctl)
13896                 tw32(TG3_RX_PTP_CTL,
13897                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13898
13899         if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13900                 tg3_flag_set(tp, TX_TSTAMP_EN);
13901         else
13902                 tg3_flag_clear(tp, TX_TSTAMP_EN);
13903
13904         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13905                 -EFAULT : 0;
13906 }
13907
13908 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13909 {
13910         struct tg3 *tp = netdev_priv(dev);
13911         struct hwtstamp_config stmpconf;
13912
13913         if (!tg3_flag(tp, PTP_CAPABLE))
13914                 return -EOPNOTSUPP;
13915
13916         stmpconf.flags = 0;
13917         stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13918                             HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13919
13920         switch (tp->rxptpctl) {
13921         case 0:
13922                 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13923                 break;
13924         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13925                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13926                 break;
13927         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13928                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13929                 break;
13930         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13931                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13932                 break;
13933         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13934                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13935                 break;
13936         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13937                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13938                 break;
13939         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13940                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13941                 break;
13942         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13943                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13944                 break;
13945         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13946                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13947                 break;
13948         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13949                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13950                 break;
13951         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13952                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13953                 break;
13954         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13955                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13956                 break;
13957         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13958                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13959                 break;
13960         default:
13961                 WARN_ON_ONCE(1);
13962                 return -ERANGE;
13963         }
13964
13965         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13966                 -EFAULT : 0;
13967 }
13968
13969 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13970 {
13971         struct mii_ioctl_data *data = if_mii(ifr);
13972         struct tg3 *tp = netdev_priv(dev);
13973         int err;
13974
13975         if (tg3_flag(tp, USE_PHYLIB)) {
13976                 struct phy_device *phydev;
13977                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13978                         return -EAGAIN;
13979                 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
13980                 return phy_mii_ioctl(phydev, ifr, cmd);
13981         }
13982
13983         switch (cmd) {
13984         case SIOCGMIIPHY:
13985                 data->phy_id = tp->phy_addr;
13986
13987                 /* fallthru */
13988         case SIOCGMIIREG: {
13989                 u32 mii_regval;
13990
13991                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13992                         break;                  /* We have no PHY */
13993
13994                 if (!netif_running(dev))
13995                         return -EAGAIN;
13996
13997                 spin_lock_bh(&tp->lock);
13998                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13999                                     data->reg_num & 0x1f, &mii_regval);
14000                 spin_unlock_bh(&tp->lock);
14001
14002                 data->val_out = mii_regval;
14003
14004                 return err;
14005         }
14006
14007         case SIOCSMIIREG:
14008                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14009                         break;                  /* We have no PHY */
14010
14011                 if (!netif_running(dev))
14012                         return -EAGAIN;
14013
14014                 spin_lock_bh(&tp->lock);
14015                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
14016                                      data->reg_num & 0x1f, data->val_in);
14017                 spin_unlock_bh(&tp->lock);
14018
14019                 return err;
14020
14021         case SIOCSHWTSTAMP:
14022                 return tg3_hwtstamp_set(dev, ifr);
14023
14024         case SIOCGHWTSTAMP:
14025                 return tg3_hwtstamp_get(dev, ifr);
14026
14027         default:
14028                 /* do nothing */
14029                 break;
14030         }
14031         return -EOPNOTSUPP;
14032 }
14033
14034 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14035 {
14036         struct tg3 *tp = netdev_priv(dev);
14037
14038         memcpy(ec, &tp->coal, sizeof(*ec));
14039         return 0;
14040 }
14041
14042 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14043 {
14044         struct tg3 *tp = netdev_priv(dev);
14045         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14046         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14047
14048         if (!tg3_flag(tp, 5705_PLUS)) {
14049                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14050                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14051                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14052                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14053         }
14054
14055         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14056             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14057             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14058             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14059             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14060             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14061             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14062             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14063             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14064             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14065                 return -EINVAL;
14066
14067         /* No rx interrupts will be generated if both are zero */
14068         if ((ec->rx_coalesce_usecs == 0) &&
14069             (ec->rx_max_coalesced_frames == 0))
14070                 return -EINVAL;
14071
14072         /* No tx interrupts will be generated if both are zero */
14073         if ((ec->tx_coalesce_usecs == 0) &&
14074             (ec->tx_max_coalesced_frames == 0))
14075                 return -EINVAL;
14076
14077         /* Only copy relevant parameters, ignore all others. */
14078         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14079         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14080         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14081         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14082         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14083         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14084         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14085         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14086         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14087
14088         if (netif_running(dev)) {
14089                 tg3_full_lock(tp, 0);
14090                 __tg3_set_coalesce(tp, &tp->coal);
14091                 tg3_full_unlock(tp);
14092         }
14093         return 0;
14094 }
14095
14096 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14097 {
14098         struct tg3 *tp = netdev_priv(dev);
14099
14100         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14101                 netdev_warn(tp->dev, "Board does not support EEE!\n");
14102                 return -EOPNOTSUPP;
14103         }
14104
14105         if (edata->advertised != tp->eee.advertised) {
14106                 netdev_warn(tp->dev,
14107                             "Direct manipulation of EEE advertisement is not supported\n");
14108                 return -EINVAL;
14109         }
14110
14111         if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14112                 netdev_warn(tp->dev,
14113                             "Maximal Tx Lpi timer supported is %#x(u)\n",
14114                             TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14115                 return -EINVAL;
14116         }
14117
14118         tp->eee = *edata;
14119
14120         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14121         tg3_warn_mgmt_link_flap(tp);
14122
14123         if (netif_running(tp->dev)) {
14124                 tg3_full_lock(tp, 0);
14125                 tg3_setup_eee(tp);
14126                 tg3_phy_reset(tp);
14127                 tg3_full_unlock(tp);
14128         }
14129
14130         return 0;
14131 }
14132
14133 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14134 {
14135         struct tg3 *tp = netdev_priv(dev);
14136
14137         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14138                 netdev_warn(tp->dev,
14139                             "Board does not support EEE!\n");
14140                 return -EOPNOTSUPP;
14141         }
14142
14143         *edata = tp->eee;
14144         return 0;
14145 }
14146
14147 static const struct ethtool_ops tg3_ethtool_ops = {
14148         .get_settings           = tg3_get_settings,
14149         .set_settings           = tg3_set_settings,
14150         .get_drvinfo            = tg3_get_drvinfo,
14151         .get_regs_len           = tg3_get_regs_len,
14152         .get_regs               = tg3_get_regs,
14153         .get_wol                = tg3_get_wol,
14154         .set_wol                = tg3_set_wol,
14155         .get_msglevel           = tg3_get_msglevel,
14156         .set_msglevel           = tg3_set_msglevel,
14157         .nway_reset             = tg3_nway_reset,
14158         .get_link               = ethtool_op_get_link,
14159         .get_eeprom_len         = tg3_get_eeprom_len,
14160         .get_eeprom             = tg3_get_eeprom,
14161         .set_eeprom             = tg3_set_eeprom,
14162         .get_ringparam          = tg3_get_ringparam,
14163         .set_ringparam          = tg3_set_ringparam,
14164         .get_pauseparam         = tg3_get_pauseparam,
14165         .set_pauseparam         = tg3_set_pauseparam,
14166         .self_test              = tg3_self_test,
14167         .get_strings            = tg3_get_strings,
14168         .set_phys_id            = tg3_set_phys_id,
14169         .get_ethtool_stats      = tg3_get_ethtool_stats,
14170         .get_coalesce           = tg3_get_coalesce,
14171         .set_coalesce           = tg3_set_coalesce,
14172         .get_sset_count         = tg3_get_sset_count,
14173         .get_rxnfc              = tg3_get_rxnfc,
14174         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14175         .get_rxfh               = tg3_get_rxfh,
14176         .set_rxfh               = tg3_set_rxfh,
14177         .get_channels           = tg3_get_channels,
14178         .set_channels           = tg3_set_channels,
14179         .get_ts_info            = tg3_get_ts_info,
14180         .get_eee                = tg3_get_eee,
14181         .set_eee                = tg3_set_eee,
14182 };
14183
14184 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
14185                                                 struct rtnl_link_stats64 *stats)
14186 {
14187         struct tg3 *tp = netdev_priv(dev);
14188
14189         spin_lock_bh(&tp->lock);
14190         if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14191                 *stats = tp->net_stats_prev;
14192                 spin_unlock_bh(&tp->lock);
14193                 return stats;
14194         }
14195
14196         tg3_get_nstats(tp, stats);
14197         spin_unlock_bh(&tp->lock);
14198
14199         return stats;
14200 }
14201
14202 static void tg3_set_rx_mode(struct net_device *dev)
14203 {
14204         struct tg3 *tp = netdev_priv(dev);
14205
14206         if (!netif_running(dev))
14207                 return;
14208
14209         tg3_full_lock(tp, 0);
14210         __tg3_set_rx_mode(dev);
14211         tg3_full_unlock(tp);
14212 }
14213
14214 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14215                                int new_mtu)
14216 {
14217         dev->mtu = new_mtu;
14218
14219         if (new_mtu > ETH_DATA_LEN) {
14220                 if (tg3_flag(tp, 5780_CLASS)) {
14221                         netdev_update_features(dev);
14222                         tg3_flag_clear(tp, TSO_CAPABLE);
14223                 } else {
14224                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
14225                 }
14226         } else {
14227                 if (tg3_flag(tp, 5780_CLASS)) {
14228                         tg3_flag_set(tp, TSO_CAPABLE);
14229                         netdev_update_features(dev);
14230                 }
14231                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14232         }
14233 }
14234
14235 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14236 {
14237         struct tg3 *tp = netdev_priv(dev);
14238         int err;
14239         bool reset_phy = false;
14240
14241         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
14242                 return -EINVAL;
14243
14244         if (!netif_running(dev)) {
14245                 /* We'll just catch it later when the
14246                  * device is up'd.
14247                  */
14248                 tg3_set_mtu(dev, tp, new_mtu);
14249                 return 0;
14250         }
14251
14252         tg3_phy_stop(tp);
14253
14254         tg3_netif_stop(tp);
14255
14256         tg3_set_mtu(dev, tp, new_mtu);
14257
14258         tg3_full_lock(tp, 1);
14259
14260         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14261
14262         /* Reset PHY, otherwise the read DMA engine will be in a mode that
14263          * breaks all requests to 256 bytes.
14264          */
14265         if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14266             tg3_asic_rev(tp) == ASIC_REV_5717 ||
14267             tg3_asic_rev(tp) == ASIC_REV_5719 ||
14268             tg3_asic_rev(tp) == ASIC_REV_5720)
14269                 reset_phy = true;
14270
14271         err = tg3_restart_hw(tp, reset_phy);
14272
14273         if (!err)
14274                 tg3_netif_start(tp);
14275
14276         tg3_full_unlock(tp);
14277
14278         if (!err)
14279                 tg3_phy_start(tp);
14280
14281         return err;
14282 }
14283
14284 static const struct net_device_ops tg3_netdev_ops = {
14285         .ndo_open               = tg3_open,
14286         .ndo_stop               = tg3_close,
14287         .ndo_start_xmit         = tg3_start_xmit,
14288         .ndo_get_stats64        = tg3_get_stats64,
14289         .ndo_validate_addr      = eth_validate_addr,
14290         .ndo_set_rx_mode        = tg3_set_rx_mode,
14291         .ndo_set_mac_address    = tg3_set_mac_addr,
14292         .ndo_do_ioctl           = tg3_ioctl,
14293         .ndo_tx_timeout         = tg3_tx_timeout,
14294         .ndo_change_mtu         = tg3_change_mtu,
14295         .ndo_fix_features       = tg3_fix_features,
14296         .ndo_set_features       = tg3_set_features,
14297 #ifdef CONFIG_NET_POLL_CONTROLLER
14298         .ndo_poll_controller    = tg3_poll_controller,
14299 #endif
14300 };
14301
14302 static void tg3_get_eeprom_size(struct tg3 *tp)
14303 {
14304         u32 cursize, val, magic;
14305
14306         tp->nvram_size = EEPROM_CHIP_SIZE;
14307
14308         if (tg3_nvram_read(tp, 0, &magic) != 0)
14309                 return;
14310
14311         if ((magic != TG3_EEPROM_MAGIC) &&
14312             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14313             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14314                 return;
14315
14316         /*
14317          * Size the chip by reading offsets at increasing powers of two.
14318          * When we encounter our validation signature, we know the addressing
14319          * has wrapped around, and thus have our chip size.
14320          */
14321         cursize = 0x10;
14322
14323         while (cursize < tp->nvram_size) {
14324                 if (tg3_nvram_read(tp, cursize, &val) != 0)
14325                         return;
14326
14327                 if (val == magic)
14328                         break;
14329
14330                 cursize <<= 1;
14331         }
14332
14333         tp->nvram_size = cursize;
14334 }
14335
14336 static void tg3_get_nvram_size(struct tg3 *tp)
14337 {
14338         u32 val;
14339
14340         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14341                 return;
14342
14343         /* Selfboot format */
14344         if (val != TG3_EEPROM_MAGIC) {
14345                 tg3_get_eeprom_size(tp);
14346                 return;
14347         }
14348
14349         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14350                 if (val != 0) {
14351                         /* This is confusing.  We want to operate on the
14352                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14353                          * call will read from NVRAM and byteswap the data
14354                          * according to the byteswapping settings for all
14355                          * other register accesses.  This ensures the data we
14356                          * want will always reside in the lower 16-bits.
14357                          * However, the data in NVRAM is in LE format, which
14358                          * means the data from the NVRAM read will always be
14359                          * opposite the endianness of the CPU.  The 16-bit
14360                          * byteswap then brings the data to CPU endianness.
14361                          */
14362                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14363                         return;
14364                 }
14365         }
14366         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14367 }
14368
14369 static void tg3_get_nvram_info(struct tg3 *tp)
14370 {
14371         u32 nvcfg1;
14372
14373         nvcfg1 = tr32(NVRAM_CFG1);
14374         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14375                 tg3_flag_set(tp, FLASH);
14376         } else {
14377                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14378                 tw32(NVRAM_CFG1, nvcfg1);
14379         }
14380
14381         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14382             tg3_flag(tp, 5780_CLASS)) {
14383                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14384                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14385                         tp->nvram_jedecnum = JEDEC_ATMEL;
14386                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14387                         tg3_flag_set(tp, NVRAM_BUFFERED);
14388                         break;
14389                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14390                         tp->nvram_jedecnum = JEDEC_ATMEL;
14391                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14392                         break;
14393                 case FLASH_VENDOR_ATMEL_EEPROM:
14394                         tp->nvram_jedecnum = JEDEC_ATMEL;
14395                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14396                         tg3_flag_set(tp, NVRAM_BUFFERED);
14397                         break;
14398                 case FLASH_VENDOR_ST:
14399                         tp->nvram_jedecnum = JEDEC_ST;
14400                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14401                         tg3_flag_set(tp, NVRAM_BUFFERED);
14402                         break;
14403                 case FLASH_VENDOR_SAIFUN:
14404                         tp->nvram_jedecnum = JEDEC_SAIFUN;
14405                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14406                         break;
14407                 case FLASH_VENDOR_SST_SMALL:
14408                 case FLASH_VENDOR_SST_LARGE:
14409                         tp->nvram_jedecnum = JEDEC_SST;
14410                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14411                         break;
14412                 }
14413         } else {
14414                 tp->nvram_jedecnum = JEDEC_ATMEL;
14415                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14416                 tg3_flag_set(tp, NVRAM_BUFFERED);
14417         }
14418 }
14419
14420 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14421 {
14422         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14423         case FLASH_5752PAGE_SIZE_256:
14424                 tp->nvram_pagesize = 256;
14425                 break;
14426         case FLASH_5752PAGE_SIZE_512:
14427                 tp->nvram_pagesize = 512;
14428                 break;
14429         case FLASH_5752PAGE_SIZE_1K:
14430                 tp->nvram_pagesize = 1024;
14431                 break;
14432         case FLASH_5752PAGE_SIZE_2K:
14433                 tp->nvram_pagesize = 2048;
14434                 break;
14435         case FLASH_5752PAGE_SIZE_4K:
14436                 tp->nvram_pagesize = 4096;
14437                 break;
14438         case FLASH_5752PAGE_SIZE_264:
14439                 tp->nvram_pagesize = 264;
14440                 break;
14441         case FLASH_5752PAGE_SIZE_528:
14442                 tp->nvram_pagesize = 528;
14443                 break;
14444         }
14445 }
14446
14447 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14448 {
14449         u32 nvcfg1;
14450
14451         nvcfg1 = tr32(NVRAM_CFG1);
14452
14453         /* NVRAM protection for TPM */
14454         if (nvcfg1 & (1 << 27))
14455                 tg3_flag_set(tp, PROTECTED_NVRAM);
14456
14457         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14458         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14459         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14460                 tp->nvram_jedecnum = JEDEC_ATMEL;
14461                 tg3_flag_set(tp, NVRAM_BUFFERED);
14462                 break;
14463         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14464                 tp->nvram_jedecnum = JEDEC_ATMEL;
14465                 tg3_flag_set(tp, NVRAM_BUFFERED);
14466                 tg3_flag_set(tp, FLASH);
14467                 break;
14468         case FLASH_5752VENDOR_ST_M45PE10:
14469         case FLASH_5752VENDOR_ST_M45PE20:
14470         case FLASH_5752VENDOR_ST_M45PE40:
14471                 tp->nvram_jedecnum = JEDEC_ST;
14472                 tg3_flag_set(tp, NVRAM_BUFFERED);
14473                 tg3_flag_set(tp, FLASH);
14474                 break;
14475         }
14476
14477         if (tg3_flag(tp, FLASH)) {
14478                 tg3_nvram_get_pagesize(tp, nvcfg1);
14479         } else {
14480                 /* For eeprom, set pagesize to maximum eeprom size */
14481                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14482
14483                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14484                 tw32(NVRAM_CFG1, nvcfg1);
14485         }
14486 }
14487
14488 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14489 {
14490         u32 nvcfg1, protect = 0;
14491
14492         nvcfg1 = tr32(NVRAM_CFG1);
14493
14494         /* NVRAM protection for TPM */
14495         if (nvcfg1 & (1 << 27)) {
14496                 tg3_flag_set(tp, PROTECTED_NVRAM);
14497                 protect = 1;
14498         }
14499
14500         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14501         switch (nvcfg1) {
14502         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14503         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14504         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14505         case FLASH_5755VENDOR_ATMEL_FLASH_5:
14506                 tp->nvram_jedecnum = JEDEC_ATMEL;
14507                 tg3_flag_set(tp, NVRAM_BUFFERED);
14508                 tg3_flag_set(tp, FLASH);
14509                 tp->nvram_pagesize = 264;
14510                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14511                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14512                         tp->nvram_size = (protect ? 0x3e200 :
14513                                           TG3_NVRAM_SIZE_512KB);
14514                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14515                         tp->nvram_size = (protect ? 0x1f200 :
14516                                           TG3_NVRAM_SIZE_256KB);
14517                 else
14518                         tp->nvram_size = (protect ? 0x1f200 :
14519                                           TG3_NVRAM_SIZE_128KB);
14520                 break;
14521         case FLASH_5752VENDOR_ST_M45PE10:
14522         case FLASH_5752VENDOR_ST_M45PE20:
14523         case FLASH_5752VENDOR_ST_M45PE40:
14524                 tp->nvram_jedecnum = JEDEC_ST;
14525                 tg3_flag_set(tp, NVRAM_BUFFERED);
14526                 tg3_flag_set(tp, FLASH);
14527                 tp->nvram_pagesize = 256;
14528                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14529                         tp->nvram_size = (protect ?
14530                                           TG3_NVRAM_SIZE_64KB :
14531                                           TG3_NVRAM_SIZE_128KB);
14532                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14533                         tp->nvram_size = (protect ?
14534                                           TG3_NVRAM_SIZE_64KB :
14535                                           TG3_NVRAM_SIZE_256KB);
14536                 else
14537                         tp->nvram_size = (protect ?
14538                                           TG3_NVRAM_SIZE_128KB :
14539                                           TG3_NVRAM_SIZE_512KB);
14540                 break;
14541         }
14542 }
14543
14544 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14545 {
14546         u32 nvcfg1;
14547
14548         nvcfg1 = tr32(NVRAM_CFG1);
14549
14550         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14551         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14552         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14553         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14554         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14555                 tp->nvram_jedecnum = JEDEC_ATMEL;
14556                 tg3_flag_set(tp, NVRAM_BUFFERED);
14557                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14558
14559                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14560                 tw32(NVRAM_CFG1, nvcfg1);
14561                 break;
14562         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14563         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14564         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14565         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14566                 tp->nvram_jedecnum = JEDEC_ATMEL;
14567                 tg3_flag_set(tp, NVRAM_BUFFERED);
14568                 tg3_flag_set(tp, FLASH);
14569                 tp->nvram_pagesize = 264;
14570                 break;
14571         case FLASH_5752VENDOR_ST_M45PE10:
14572         case FLASH_5752VENDOR_ST_M45PE20:
14573         case FLASH_5752VENDOR_ST_M45PE40:
14574                 tp->nvram_jedecnum = JEDEC_ST;
14575                 tg3_flag_set(tp, NVRAM_BUFFERED);
14576                 tg3_flag_set(tp, FLASH);
14577                 tp->nvram_pagesize = 256;
14578                 break;
14579         }
14580 }
14581
14582 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14583 {
14584         u32 nvcfg1, protect = 0;
14585
14586         nvcfg1 = tr32(NVRAM_CFG1);
14587
14588         /* NVRAM protection for TPM */
14589         if (nvcfg1 & (1 << 27)) {
14590                 tg3_flag_set(tp, PROTECTED_NVRAM);
14591                 protect = 1;
14592         }
14593
14594         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14595         switch (nvcfg1) {
14596         case FLASH_5761VENDOR_ATMEL_ADB021D:
14597         case FLASH_5761VENDOR_ATMEL_ADB041D:
14598         case FLASH_5761VENDOR_ATMEL_ADB081D:
14599         case FLASH_5761VENDOR_ATMEL_ADB161D:
14600         case FLASH_5761VENDOR_ATMEL_MDB021D:
14601         case FLASH_5761VENDOR_ATMEL_MDB041D:
14602         case FLASH_5761VENDOR_ATMEL_MDB081D:
14603         case FLASH_5761VENDOR_ATMEL_MDB161D:
14604                 tp->nvram_jedecnum = JEDEC_ATMEL;
14605                 tg3_flag_set(tp, NVRAM_BUFFERED);
14606                 tg3_flag_set(tp, FLASH);
14607                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14608                 tp->nvram_pagesize = 256;
14609                 break;
14610         case FLASH_5761VENDOR_ST_A_M45PE20:
14611         case FLASH_5761VENDOR_ST_A_M45PE40:
14612         case FLASH_5761VENDOR_ST_A_M45PE80:
14613         case FLASH_5761VENDOR_ST_A_M45PE16:
14614         case FLASH_5761VENDOR_ST_M_M45PE20:
14615         case FLASH_5761VENDOR_ST_M_M45PE40:
14616         case FLASH_5761VENDOR_ST_M_M45PE80:
14617         case FLASH_5761VENDOR_ST_M_M45PE16:
14618                 tp->nvram_jedecnum = JEDEC_ST;
14619                 tg3_flag_set(tp, NVRAM_BUFFERED);
14620                 tg3_flag_set(tp, FLASH);
14621                 tp->nvram_pagesize = 256;
14622                 break;
14623         }
14624
14625         if (protect) {
14626                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14627         } else {
14628                 switch (nvcfg1) {
14629                 case FLASH_5761VENDOR_ATMEL_ADB161D:
14630                 case FLASH_5761VENDOR_ATMEL_MDB161D:
14631                 case FLASH_5761VENDOR_ST_A_M45PE16:
14632                 case FLASH_5761VENDOR_ST_M_M45PE16:
14633                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14634                         break;
14635                 case FLASH_5761VENDOR_ATMEL_ADB081D:
14636                 case FLASH_5761VENDOR_ATMEL_MDB081D:
14637                 case FLASH_5761VENDOR_ST_A_M45PE80:
14638                 case FLASH_5761VENDOR_ST_M_M45PE80:
14639                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14640                         break;
14641                 case FLASH_5761VENDOR_ATMEL_ADB041D:
14642                 case FLASH_5761VENDOR_ATMEL_MDB041D:
14643                 case FLASH_5761VENDOR_ST_A_M45PE40:
14644                 case FLASH_5761VENDOR_ST_M_M45PE40:
14645                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14646                         break;
14647                 case FLASH_5761VENDOR_ATMEL_ADB021D:
14648                 case FLASH_5761VENDOR_ATMEL_MDB021D:
14649                 case FLASH_5761VENDOR_ST_A_M45PE20:
14650                 case FLASH_5761VENDOR_ST_M_M45PE20:
14651                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14652                         break;
14653                 }
14654         }
14655 }
14656
14657 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14658 {
14659         tp->nvram_jedecnum = JEDEC_ATMEL;
14660         tg3_flag_set(tp, NVRAM_BUFFERED);
14661         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14662 }
14663
14664 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14665 {
14666         u32 nvcfg1;
14667
14668         nvcfg1 = tr32(NVRAM_CFG1);
14669
14670         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14671         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14672         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14673                 tp->nvram_jedecnum = JEDEC_ATMEL;
14674                 tg3_flag_set(tp, NVRAM_BUFFERED);
14675                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14676
14677                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14678                 tw32(NVRAM_CFG1, nvcfg1);
14679                 return;
14680         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14681         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14682         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14683         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14684         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14685         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14686         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14687                 tp->nvram_jedecnum = JEDEC_ATMEL;
14688                 tg3_flag_set(tp, NVRAM_BUFFERED);
14689                 tg3_flag_set(tp, FLASH);
14690
14691                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14692                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14693                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14694                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14695                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14696                         break;
14697                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14698                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14699                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14700                         break;
14701                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14702                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14703                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14704                         break;
14705                 }
14706                 break;
14707         case FLASH_5752VENDOR_ST_M45PE10:
14708         case FLASH_5752VENDOR_ST_M45PE20:
14709         case FLASH_5752VENDOR_ST_M45PE40:
14710                 tp->nvram_jedecnum = JEDEC_ST;
14711                 tg3_flag_set(tp, NVRAM_BUFFERED);
14712                 tg3_flag_set(tp, FLASH);
14713
14714                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14715                 case FLASH_5752VENDOR_ST_M45PE10:
14716                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14717                         break;
14718                 case FLASH_5752VENDOR_ST_M45PE20:
14719                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14720                         break;
14721                 case FLASH_5752VENDOR_ST_M45PE40:
14722                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14723                         break;
14724                 }
14725                 break;
14726         default:
14727                 tg3_flag_set(tp, NO_NVRAM);
14728                 return;
14729         }
14730
14731         tg3_nvram_get_pagesize(tp, nvcfg1);
14732         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14733                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14734 }
14735
14736
14737 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14738 {
14739         u32 nvcfg1;
14740
14741         nvcfg1 = tr32(NVRAM_CFG1);
14742
14743         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14744         case FLASH_5717VENDOR_ATMEL_EEPROM:
14745         case FLASH_5717VENDOR_MICRO_EEPROM:
14746                 tp->nvram_jedecnum = JEDEC_ATMEL;
14747                 tg3_flag_set(tp, NVRAM_BUFFERED);
14748                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14749
14750                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14751                 tw32(NVRAM_CFG1, nvcfg1);
14752                 return;
14753         case FLASH_5717VENDOR_ATMEL_MDB011D:
14754         case FLASH_5717VENDOR_ATMEL_ADB011B:
14755         case FLASH_5717VENDOR_ATMEL_ADB011D:
14756         case FLASH_5717VENDOR_ATMEL_MDB021D:
14757         case FLASH_5717VENDOR_ATMEL_ADB021B:
14758         case FLASH_5717VENDOR_ATMEL_ADB021D:
14759         case FLASH_5717VENDOR_ATMEL_45USPT:
14760                 tp->nvram_jedecnum = JEDEC_ATMEL;
14761                 tg3_flag_set(tp, NVRAM_BUFFERED);
14762                 tg3_flag_set(tp, FLASH);
14763
14764                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14765                 case FLASH_5717VENDOR_ATMEL_MDB021D:
14766                         /* Detect size with tg3_nvram_get_size() */
14767                         break;
14768                 case FLASH_5717VENDOR_ATMEL_ADB021B:
14769                 case FLASH_5717VENDOR_ATMEL_ADB021D:
14770                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14771                         break;
14772                 default:
14773                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14774                         break;
14775                 }
14776                 break;
14777         case FLASH_5717VENDOR_ST_M_M25PE10:
14778         case FLASH_5717VENDOR_ST_A_M25PE10:
14779         case FLASH_5717VENDOR_ST_M_M45PE10:
14780         case FLASH_5717VENDOR_ST_A_M45PE10:
14781         case FLASH_5717VENDOR_ST_M_M25PE20:
14782         case FLASH_5717VENDOR_ST_A_M25PE20:
14783         case FLASH_5717VENDOR_ST_M_M45PE20:
14784         case FLASH_5717VENDOR_ST_A_M45PE20:
14785         case FLASH_5717VENDOR_ST_25USPT:
14786         case FLASH_5717VENDOR_ST_45USPT:
14787                 tp->nvram_jedecnum = JEDEC_ST;
14788                 tg3_flag_set(tp, NVRAM_BUFFERED);
14789                 tg3_flag_set(tp, FLASH);
14790
14791                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14792                 case FLASH_5717VENDOR_ST_M_M25PE20:
14793                 case FLASH_5717VENDOR_ST_M_M45PE20:
14794                         /* Detect size with tg3_nvram_get_size() */
14795                         break;
14796                 case FLASH_5717VENDOR_ST_A_M25PE20:
14797                 case FLASH_5717VENDOR_ST_A_M45PE20:
14798                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14799                         break;
14800                 default:
14801                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14802                         break;
14803                 }
14804                 break;
14805         default:
14806                 tg3_flag_set(tp, NO_NVRAM);
14807                 return;
14808         }
14809
14810         tg3_nvram_get_pagesize(tp, nvcfg1);
14811         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14812                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14813 }
14814
14815 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14816 {
14817         u32 nvcfg1, nvmpinstrp;
14818
14819         nvcfg1 = tr32(NVRAM_CFG1);
14820         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14821
14822         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14823                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14824                         tg3_flag_set(tp, NO_NVRAM);
14825                         return;
14826                 }
14827
14828                 switch (nvmpinstrp) {
14829                 case FLASH_5762_EEPROM_HD:
14830                         nvmpinstrp = FLASH_5720_EEPROM_HD;
14831                         break;
14832                 case FLASH_5762_EEPROM_LD:
14833                         nvmpinstrp = FLASH_5720_EEPROM_LD;
14834                         break;
14835                 case FLASH_5720VENDOR_M_ST_M45PE20:
14836                         /* This pinstrap supports multiple sizes, so force it
14837                          * to read the actual size from location 0xf0.
14838                          */
14839                         nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14840                         break;
14841                 }
14842         }
14843
14844         switch (nvmpinstrp) {
14845         case FLASH_5720_EEPROM_HD:
14846         case FLASH_5720_EEPROM_LD:
14847                 tp->nvram_jedecnum = JEDEC_ATMEL;
14848                 tg3_flag_set(tp, NVRAM_BUFFERED);
14849
14850                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14851                 tw32(NVRAM_CFG1, nvcfg1);
14852                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14853                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14854                 else
14855                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14856                 return;
14857         case FLASH_5720VENDOR_M_ATMEL_DB011D:
14858         case FLASH_5720VENDOR_A_ATMEL_DB011B:
14859         case FLASH_5720VENDOR_A_ATMEL_DB011D:
14860         case FLASH_5720VENDOR_M_ATMEL_DB021D:
14861         case FLASH_5720VENDOR_A_ATMEL_DB021B:
14862         case FLASH_5720VENDOR_A_ATMEL_DB021D:
14863         case FLASH_5720VENDOR_M_ATMEL_DB041D:
14864         case FLASH_5720VENDOR_A_ATMEL_DB041B:
14865         case FLASH_5720VENDOR_A_ATMEL_DB041D:
14866         case FLASH_5720VENDOR_M_ATMEL_DB081D:
14867         case FLASH_5720VENDOR_A_ATMEL_DB081D:
14868         case FLASH_5720VENDOR_ATMEL_45USPT:
14869                 tp->nvram_jedecnum = JEDEC_ATMEL;
14870                 tg3_flag_set(tp, NVRAM_BUFFERED);
14871                 tg3_flag_set(tp, FLASH);
14872
14873                 switch (nvmpinstrp) {
14874                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14875                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14876                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14877                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14878                         break;
14879                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14880                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14881                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14882                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14883                         break;
14884                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14885                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14886                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14887                         break;
14888                 default:
14889                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14890                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14891                         break;
14892                 }
14893                 break;
14894         case FLASH_5720VENDOR_M_ST_M25PE10:
14895         case FLASH_5720VENDOR_M_ST_M45PE10:
14896         case FLASH_5720VENDOR_A_ST_M25PE10:
14897         case FLASH_5720VENDOR_A_ST_M45PE10:
14898         case FLASH_5720VENDOR_M_ST_M25PE20:
14899         case FLASH_5720VENDOR_M_ST_M45PE20:
14900         case FLASH_5720VENDOR_A_ST_M25PE20:
14901         case FLASH_5720VENDOR_A_ST_M45PE20:
14902         case FLASH_5720VENDOR_M_ST_M25PE40:
14903         case FLASH_5720VENDOR_M_ST_M45PE40:
14904         case FLASH_5720VENDOR_A_ST_M25PE40:
14905         case FLASH_5720VENDOR_A_ST_M45PE40:
14906         case FLASH_5720VENDOR_M_ST_M25PE80:
14907         case FLASH_5720VENDOR_M_ST_M45PE80:
14908         case FLASH_5720VENDOR_A_ST_M25PE80:
14909         case FLASH_5720VENDOR_A_ST_M45PE80:
14910         case FLASH_5720VENDOR_ST_25USPT:
14911         case FLASH_5720VENDOR_ST_45USPT:
14912                 tp->nvram_jedecnum = JEDEC_ST;
14913                 tg3_flag_set(tp, NVRAM_BUFFERED);
14914                 tg3_flag_set(tp, FLASH);
14915
14916                 switch (nvmpinstrp) {
14917                 case FLASH_5720VENDOR_M_ST_M25PE20:
14918                 case FLASH_5720VENDOR_M_ST_M45PE20:
14919                 case FLASH_5720VENDOR_A_ST_M25PE20:
14920                 case FLASH_5720VENDOR_A_ST_M45PE20:
14921                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14922                         break;
14923                 case FLASH_5720VENDOR_M_ST_M25PE40:
14924                 case FLASH_5720VENDOR_M_ST_M45PE40:
14925                 case FLASH_5720VENDOR_A_ST_M25PE40:
14926                 case FLASH_5720VENDOR_A_ST_M45PE40:
14927                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14928                         break;
14929                 case FLASH_5720VENDOR_M_ST_M25PE80:
14930                 case FLASH_5720VENDOR_M_ST_M45PE80:
14931                 case FLASH_5720VENDOR_A_ST_M25PE80:
14932                 case FLASH_5720VENDOR_A_ST_M45PE80:
14933                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14934                         break;
14935                 default:
14936                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14937                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14938                         break;
14939                 }
14940                 break;
14941         default:
14942                 tg3_flag_set(tp, NO_NVRAM);
14943                 return;
14944         }
14945
14946         tg3_nvram_get_pagesize(tp, nvcfg1);
14947         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14948                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14949
14950         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14951                 u32 val;
14952
14953                 if (tg3_nvram_read(tp, 0, &val))
14954                         return;
14955
14956                 if (val != TG3_EEPROM_MAGIC &&
14957                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14958                         tg3_flag_set(tp, NO_NVRAM);
14959         }
14960 }
14961
14962 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14963 static void tg3_nvram_init(struct tg3 *tp)
14964 {
14965         if (tg3_flag(tp, IS_SSB_CORE)) {
14966                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14967                 tg3_flag_clear(tp, NVRAM);
14968                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14969                 tg3_flag_set(tp, NO_NVRAM);
14970                 return;
14971         }
14972
14973         tw32_f(GRC_EEPROM_ADDR,
14974              (EEPROM_ADDR_FSM_RESET |
14975               (EEPROM_DEFAULT_CLOCK_PERIOD <<
14976                EEPROM_ADDR_CLKPERD_SHIFT)));
14977
14978         msleep(1);
14979
14980         /* Enable seeprom accesses. */
14981         tw32_f(GRC_LOCAL_CTRL,
14982              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14983         udelay(100);
14984
14985         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14986             tg3_asic_rev(tp) != ASIC_REV_5701) {
14987                 tg3_flag_set(tp, NVRAM);
14988
14989                 if (tg3_nvram_lock(tp)) {
14990                         netdev_warn(tp->dev,
14991                                     "Cannot get nvram lock, %s failed\n",
14992                                     __func__);
14993                         return;
14994                 }
14995                 tg3_enable_nvram_access(tp);
14996
14997                 tp->nvram_size = 0;
14998
14999                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
15000                         tg3_get_5752_nvram_info(tp);
15001                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15002                         tg3_get_5755_nvram_info(tp);
15003                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15004                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
15005                          tg3_asic_rev(tp) == ASIC_REV_5785)
15006                         tg3_get_5787_nvram_info(tp);
15007                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15008                         tg3_get_5761_nvram_info(tp);
15009                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15010                         tg3_get_5906_nvram_info(tp);
15011                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15012                          tg3_flag(tp, 57765_CLASS))
15013                         tg3_get_57780_nvram_info(tp);
15014                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15015                          tg3_asic_rev(tp) == ASIC_REV_5719)
15016                         tg3_get_5717_nvram_info(tp);
15017                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15018                          tg3_asic_rev(tp) == ASIC_REV_5762)
15019                         tg3_get_5720_nvram_info(tp);
15020                 else
15021                         tg3_get_nvram_info(tp);
15022
15023                 if (tp->nvram_size == 0)
15024                         tg3_get_nvram_size(tp);
15025
15026                 tg3_disable_nvram_access(tp);
15027                 tg3_nvram_unlock(tp);
15028
15029         } else {
15030                 tg3_flag_clear(tp, NVRAM);
15031                 tg3_flag_clear(tp, NVRAM_BUFFERED);
15032
15033                 tg3_get_eeprom_size(tp);
15034         }
15035 }
15036
15037 struct subsys_tbl_ent {
15038         u16 subsys_vendor, subsys_devid;
15039         u32 phy_id;
15040 };
15041
15042 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15043         /* Broadcom boards. */
15044         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15045           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15046         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15047           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15048         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15049           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15050         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15051           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15052         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15053           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15054         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15055           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15056         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15057           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15058         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15059           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15060         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15061           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15062         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15063           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15064         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15065           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15066
15067         /* 3com boards. */
15068         { TG3PCI_SUBVENDOR_ID_3COM,
15069           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15070         { TG3PCI_SUBVENDOR_ID_3COM,
15071           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15072         { TG3PCI_SUBVENDOR_ID_3COM,
15073           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15074         { TG3PCI_SUBVENDOR_ID_3COM,
15075           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15076         { TG3PCI_SUBVENDOR_ID_3COM,
15077           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15078
15079         /* DELL boards. */
15080         { TG3PCI_SUBVENDOR_ID_DELL,
15081           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15082         { TG3PCI_SUBVENDOR_ID_DELL,
15083           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15084         { TG3PCI_SUBVENDOR_ID_DELL,
15085           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15086         { TG3PCI_SUBVENDOR_ID_DELL,
15087           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15088
15089         /* Compaq boards. */
15090         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15091           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15092         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15093           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15094         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15095           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15096         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15097           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15098         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15099           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15100
15101         /* IBM boards. */
15102         { TG3PCI_SUBVENDOR_ID_IBM,
15103           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15104 };
15105
15106 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15107 {
15108         int i;
15109
15110         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15111                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15112                      tp->pdev->subsystem_vendor) &&
15113                     (subsys_id_to_phy_id[i].subsys_devid ==
15114                      tp->pdev->subsystem_device))
15115                         return &subsys_id_to_phy_id[i];
15116         }
15117         return NULL;
15118 }
15119
15120 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15121 {
15122         u32 val;
15123
15124         tp->phy_id = TG3_PHY_ID_INVALID;
15125         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15126
15127         /* Assume an onboard device and WOL capable by default.  */
15128         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15129         tg3_flag_set(tp, WOL_CAP);
15130
15131         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15132                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15133                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15134                         tg3_flag_set(tp, IS_NIC);
15135                 }
15136                 val = tr32(VCPU_CFGSHDW);
15137                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15138                         tg3_flag_set(tp, ASPM_WORKAROUND);
15139                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15140                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15141                         tg3_flag_set(tp, WOL_ENABLE);
15142                         device_set_wakeup_enable(&tp->pdev->dev, true);
15143                 }
15144                 goto done;
15145         }
15146
15147         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15148         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15149                 u32 nic_cfg, led_cfg;
15150                 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15151                 u32 nic_phy_id, ver, eeprom_phy_id;
15152                 int eeprom_phy_serdes = 0;
15153
15154                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15155                 tp->nic_sram_data_cfg = nic_cfg;
15156
15157                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15158                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15159                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15160                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
15161                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
15162                     (ver > 0) && (ver < 0x100))
15163                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15164
15165                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15166                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15167
15168                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15169                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
15170                     tg3_asic_rev(tp) == ASIC_REV_5720)
15171                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15172
15173                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15174                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15175                         eeprom_phy_serdes = 1;
15176
15177                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15178                 if (nic_phy_id != 0) {
15179                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15180                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15181
15182                         eeprom_phy_id  = (id1 >> 16) << 10;
15183                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
15184                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15185                 } else
15186                         eeprom_phy_id = 0;
15187
15188                 tp->phy_id = eeprom_phy_id;
15189                 if (eeprom_phy_serdes) {
15190                         if (!tg3_flag(tp, 5705_PLUS))
15191                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15192                         else
15193                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15194                 }
15195
15196                 if (tg3_flag(tp, 5750_PLUS))
15197                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15198                                     SHASTA_EXT_LED_MODE_MASK);
15199                 else
15200                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15201
15202                 switch (led_cfg) {
15203                 default:
15204                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15205                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15206                         break;
15207
15208                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15209                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15210                         break;
15211
15212                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15213                         tp->led_ctrl = LED_CTRL_MODE_MAC;
15214
15215                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15216                          * read on some older 5700/5701 bootcode.
15217                          */
15218                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15219                             tg3_asic_rev(tp) == ASIC_REV_5701)
15220                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15221
15222                         break;
15223
15224                 case SHASTA_EXT_LED_SHARED:
15225                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
15226                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15227                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15228                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15229                                                  LED_CTRL_MODE_PHY_2);
15230
15231                         if (tg3_flag(tp, 5717_PLUS) ||
15232                             tg3_asic_rev(tp) == ASIC_REV_5762)
15233                                 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15234                                                 LED_CTRL_BLINK_RATE_MASK;
15235
15236                         break;
15237
15238                 case SHASTA_EXT_LED_MAC:
15239                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15240                         break;
15241
15242                 case SHASTA_EXT_LED_COMBO:
15243                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
15244                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15245                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15246                                                  LED_CTRL_MODE_PHY_2);
15247                         break;
15248
15249                 }
15250
15251                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15252                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
15253                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15254                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15255
15256                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15257                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15258
15259                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15260                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15261                         if ((tp->pdev->subsystem_vendor ==
15262                              PCI_VENDOR_ID_ARIMA) &&
15263                             (tp->pdev->subsystem_device == 0x205a ||
15264                              tp->pdev->subsystem_device == 0x2063))
15265                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15266                 } else {
15267                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15268                         tg3_flag_set(tp, IS_NIC);
15269                 }
15270
15271                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15272                         tg3_flag_set(tp, ENABLE_ASF);
15273                         if (tg3_flag(tp, 5750_PLUS))
15274                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15275                 }
15276
15277                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15278                     tg3_flag(tp, 5750_PLUS))
15279                         tg3_flag_set(tp, ENABLE_APE);
15280
15281                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15282                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15283                         tg3_flag_clear(tp, WOL_CAP);
15284
15285                 if (tg3_flag(tp, WOL_CAP) &&
15286                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15287                         tg3_flag_set(tp, WOL_ENABLE);
15288                         device_set_wakeup_enable(&tp->pdev->dev, true);
15289                 }
15290
15291                 if (cfg2 & (1 << 17))
15292                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15293
15294                 /* serdes signal pre-emphasis in register 0x590 set by */
15295                 /* bootcode if bit 18 is set */
15296                 if (cfg2 & (1 << 18))
15297                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15298
15299                 if ((tg3_flag(tp, 57765_PLUS) ||
15300                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15301                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15302                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15303                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15304
15305                 if (tg3_flag(tp, PCI_EXPRESS)) {
15306                         u32 cfg3;
15307
15308                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15309                         if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15310                             !tg3_flag(tp, 57765_PLUS) &&
15311                             (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15312                                 tg3_flag_set(tp, ASPM_WORKAROUND);
15313                         if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15314                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15315                         if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15316                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15317                 }
15318
15319                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15320                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15321                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15322                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15323                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15324                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15325
15326                 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15327                         tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15328         }
15329 done:
15330         if (tg3_flag(tp, WOL_CAP))
15331                 device_set_wakeup_enable(&tp->pdev->dev,
15332                                          tg3_flag(tp, WOL_ENABLE));
15333         else
15334                 device_set_wakeup_capable(&tp->pdev->dev, false);
15335 }
15336
15337 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15338 {
15339         int i, err;
15340         u32 val2, off = offset * 8;
15341
15342         err = tg3_nvram_lock(tp);
15343         if (err)
15344                 return err;
15345
15346         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15347         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15348                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15349         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15350         udelay(10);
15351
15352         for (i = 0; i < 100; i++) {
15353                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15354                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15355                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15356                         break;
15357                 }
15358                 udelay(10);
15359         }
15360
15361         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15362
15363         tg3_nvram_unlock(tp);
15364         if (val2 & APE_OTP_STATUS_CMD_DONE)
15365                 return 0;
15366
15367         return -EBUSY;
15368 }
15369
15370 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15371 {
15372         int i;
15373         u32 val;
15374
15375         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15376         tw32(OTP_CTRL, cmd);
15377
15378         /* Wait for up to 1 ms for command to execute. */
15379         for (i = 0; i < 100; i++) {
15380                 val = tr32(OTP_STATUS);
15381                 if (val & OTP_STATUS_CMD_DONE)
15382                         break;
15383                 udelay(10);
15384         }
15385
15386         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15387 }
15388
15389 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15390  * configuration is a 32-bit value that straddles the alignment boundary.
15391  * We do two 32-bit reads and then shift and merge the results.
15392  */
15393 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15394 {
15395         u32 bhalf_otp, thalf_otp;
15396
15397         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15398
15399         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15400                 return 0;
15401
15402         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15403
15404         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15405                 return 0;
15406
15407         thalf_otp = tr32(OTP_READ_DATA);
15408
15409         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15410
15411         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15412                 return 0;
15413
15414         bhalf_otp = tr32(OTP_READ_DATA);
15415
15416         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15417 }
15418
15419 static void tg3_phy_init_link_config(struct tg3 *tp)
15420 {
15421         u32 adv = ADVERTISED_Autoneg;
15422
15423         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15424                 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15425                         adv |= ADVERTISED_1000baseT_Half;
15426                 adv |= ADVERTISED_1000baseT_Full;
15427         }
15428
15429         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15430                 adv |= ADVERTISED_100baseT_Half |
15431                        ADVERTISED_100baseT_Full |
15432                        ADVERTISED_10baseT_Half |
15433                        ADVERTISED_10baseT_Full |
15434                        ADVERTISED_TP;
15435         else
15436                 adv |= ADVERTISED_FIBRE;
15437
15438         tp->link_config.advertising = adv;
15439         tp->link_config.speed = SPEED_UNKNOWN;
15440         tp->link_config.duplex = DUPLEX_UNKNOWN;
15441         tp->link_config.autoneg = AUTONEG_ENABLE;
15442         tp->link_config.active_speed = SPEED_UNKNOWN;
15443         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15444
15445         tp->old_link = -1;
15446 }
15447
15448 static int tg3_phy_probe(struct tg3 *tp)
15449 {
15450         u32 hw_phy_id_1, hw_phy_id_2;
15451         u32 hw_phy_id, hw_phy_id_masked;
15452         int err;
15453
15454         /* flow control autonegotiation is default behavior */
15455         tg3_flag_set(tp, PAUSE_AUTONEG);
15456         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15457
15458         if (tg3_flag(tp, ENABLE_APE)) {
15459                 switch (tp->pci_fn) {
15460                 case 0:
15461                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15462                         break;
15463                 case 1:
15464                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15465                         break;
15466                 case 2:
15467                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15468                         break;
15469                 case 3:
15470                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15471                         break;
15472                 }
15473         }
15474
15475         if (!tg3_flag(tp, ENABLE_ASF) &&
15476             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15477             !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15478                 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15479                                    TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15480
15481         if (tg3_flag(tp, USE_PHYLIB))
15482                 return tg3_phy_init(tp);
15483
15484         /* Reading the PHY ID register can conflict with ASF
15485          * firmware access to the PHY hardware.
15486          */
15487         err = 0;
15488         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15489                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15490         } else {
15491                 /* Now read the physical PHY_ID from the chip and verify
15492                  * that it is sane.  If it doesn't look good, we fall back
15493                  * to either the hard-coded table based PHY_ID and failing
15494                  * that the value found in the eeprom area.
15495                  */
15496                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15497                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15498
15499                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15500                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15501                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15502
15503                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15504         }
15505
15506         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15507                 tp->phy_id = hw_phy_id;
15508                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15509                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15510                 else
15511                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15512         } else {
15513                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15514                         /* Do nothing, phy ID already set up in
15515                          * tg3_get_eeprom_hw_cfg().
15516                          */
15517                 } else {
15518                         struct subsys_tbl_ent *p;
15519
15520                         /* No eeprom signature?  Try the hardcoded
15521                          * subsys device table.
15522                          */
15523                         p = tg3_lookup_by_subsys(tp);
15524                         if (p) {
15525                                 tp->phy_id = p->phy_id;
15526                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15527                                 /* For now we saw the IDs 0xbc050cd0,
15528                                  * 0xbc050f80 and 0xbc050c30 on devices
15529                                  * connected to an BCM4785 and there are
15530                                  * probably more. Just assume that the phy is
15531                                  * supported when it is connected to a SSB core
15532                                  * for now.
15533                                  */
15534                                 return -ENODEV;
15535                         }
15536
15537                         if (!tp->phy_id ||
15538                             tp->phy_id == TG3_PHY_ID_BCM8002)
15539                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15540                 }
15541         }
15542
15543         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15544             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15545              tg3_asic_rev(tp) == ASIC_REV_5720 ||
15546              tg3_asic_rev(tp) == ASIC_REV_57766 ||
15547              tg3_asic_rev(tp) == ASIC_REV_5762 ||
15548              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15549               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15550              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15551               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15552                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15553
15554                 tp->eee.supported = SUPPORTED_100baseT_Full |
15555                                     SUPPORTED_1000baseT_Full;
15556                 tp->eee.advertised = ADVERTISED_100baseT_Full |
15557                                      ADVERTISED_1000baseT_Full;
15558                 tp->eee.eee_enabled = 1;
15559                 tp->eee.tx_lpi_enabled = 1;
15560                 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15561         }
15562
15563         tg3_phy_init_link_config(tp);
15564
15565         if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15566             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15567             !tg3_flag(tp, ENABLE_APE) &&
15568             !tg3_flag(tp, ENABLE_ASF)) {
15569                 u32 bmsr, dummy;
15570
15571                 tg3_readphy(tp, MII_BMSR, &bmsr);
15572                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15573                     (bmsr & BMSR_LSTATUS))
15574                         goto skip_phy_reset;
15575
15576                 err = tg3_phy_reset(tp);
15577                 if (err)
15578                         return err;
15579
15580                 tg3_phy_set_wirespeed(tp);
15581
15582                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15583                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15584                                             tp->link_config.flowctrl);
15585
15586                         tg3_writephy(tp, MII_BMCR,
15587                                      BMCR_ANENABLE | BMCR_ANRESTART);
15588                 }
15589         }
15590
15591 skip_phy_reset:
15592         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15593                 err = tg3_init_5401phy_dsp(tp);
15594                 if (err)
15595                         return err;
15596
15597                 err = tg3_init_5401phy_dsp(tp);
15598         }
15599
15600         return err;
15601 }
15602
15603 static void tg3_read_vpd(struct tg3 *tp)
15604 {
15605         u8 *vpd_data;
15606         unsigned int block_end, rosize, len;
15607         u32 vpdlen;
15608         int j, i = 0;
15609
15610         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15611         if (!vpd_data)
15612                 goto out_no_vpd;
15613
15614         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15615         if (i < 0)
15616                 goto out_not_found;
15617
15618         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15619         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15620         i += PCI_VPD_LRDT_TAG_SIZE;
15621
15622         if (block_end > vpdlen)
15623                 goto out_not_found;
15624
15625         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15626                                       PCI_VPD_RO_KEYWORD_MFR_ID);
15627         if (j > 0) {
15628                 len = pci_vpd_info_field_size(&vpd_data[j]);
15629
15630                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15631                 if (j + len > block_end || len != 4 ||
15632                     memcmp(&vpd_data[j], "1028", 4))
15633                         goto partno;
15634
15635                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15636                                               PCI_VPD_RO_KEYWORD_VENDOR0);
15637                 if (j < 0)
15638                         goto partno;
15639
15640                 len = pci_vpd_info_field_size(&vpd_data[j]);
15641
15642                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15643                 if (j + len > block_end)
15644                         goto partno;
15645
15646                 if (len >= sizeof(tp->fw_ver))
15647                         len = sizeof(tp->fw_ver) - 1;
15648                 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15649                 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15650                          &vpd_data[j]);
15651         }
15652
15653 partno:
15654         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15655                                       PCI_VPD_RO_KEYWORD_PARTNO);
15656         if (i < 0)
15657                 goto out_not_found;
15658
15659         len = pci_vpd_info_field_size(&vpd_data[i]);
15660
15661         i += PCI_VPD_INFO_FLD_HDR_SIZE;
15662         if (len > TG3_BPN_SIZE ||
15663             (len + i) > vpdlen)
15664                 goto out_not_found;
15665
15666         memcpy(tp->board_part_number, &vpd_data[i], len);
15667
15668 out_not_found:
15669         kfree(vpd_data);
15670         if (tp->board_part_number[0])
15671                 return;
15672
15673 out_no_vpd:
15674         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15675                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15676                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15677                         strcpy(tp->board_part_number, "BCM5717");
15678                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15679                         strcpy(tp->board_part_number, "BCM5718");
15680                 else
15681                         goto nomatch;
15682         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15683                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15684                         strcpy(tp->board_part_number, "BCM57780");
15685                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15686                         strcpy(tp->board_part_number, "BCM57760");
15687                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15688                         strcpy(tp->board_part_number, "BCM57790");
15689                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15690                         strcpy(tp->board_part_number, "BCM57788");
15691                 else
15692                         goto nomatch;
15693         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15694                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15695                         strcpy(tp->board_part_number, "BCM57761");
15696                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15697                         strcpy(tp->board_part_number, "BCM57765");
15698                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15699                         strcpy(tp->board_part_number, "BCM57781");
15700                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15701                         strcpy(tp->board_part_number, "BCM57785");
15702                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15703                         strcpy(tp->board_part_number, "BCM57791");
15704                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15705                         strcpy(tp->board_part_number, "BCM57795");
15706                 else
15707                         goto nomatch;
15708         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15709                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15710                         strcpy(tp->board_part_number, "BCM57762");
15711                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15712                         strcpy(tp->board_part_number, "BCM57766");
15713                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15714                         strcpy(tp->board_part_number, "BCM57782");
15715                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15716                         strcpy(tp->board_part_number, "BCM57786");
15717                 else
15718                         goto nomatch;
15719         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15720                 strcpy(tp->board_part_number, "BCM95906");
15721         } else {
15722 nomatch:
15723                 strcpy(tp->board_part_number, "none");
15724         }
15725 }
15726
15727 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15728 {
15729         u32 val;
15730
15731         if (tg3_nvram_read(tp, offset, &val) ||
15732             (val & 0xfc000000) != 0x0c000000 ||
15733             tg3_nvram_read(tp, offset + 4, &val) ||
15734             val != 0)
15735                 return 0;
15736
15737         return 1;
15738 }
15739
15740 static void tg3_read_bc_ver(struct tg3 *tp)
15741 {
15742         u32 val, offset, start, ver_offset;
15743         int i, dst_off;
15744         bool newver = false;
15745
15746         if (tg3_nvram_read(tp, 0xc, &offset) ||
15747             tg3_nvram_read(tp, 0x4, &start))
15748                 return;
15749
15750         offset = tg3_nvram_logical_addr(tp, offset);
15751
15752         if (tg3_nvram_read(tp, offset, &val))
15753                 return;
15754
15755         if ((val & 0xfc000000) == 0x0c000000) {
15756                 if (tg3_nvram_read(tp, offset + 4, &val))
15757                         return;
15758
15759                 if (val == 0)
15760                         newver = true;
15761         }
15762
15763         dst_off = strlen(tp->fw_ver);
15764
15765         if (newver) {
15766                 if (TG3_VER_SIZE - dst_off < 16 ||
15767                     tg3_nvram_read(tp, offset + 8, &ver_offset))
15768                         return;
15769
15770                 offset = offset + ver_offset - start;
15771                 for (i = 0; i < 16; i += 4) {
15772                         __be32 v;
15773                         if (tg3_nvram_read_be32(tp, offset + i, &v))
15774                                 return;
15775
15776                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15777                 }
15778         } else {
15779                 u32 major, minor;
15780
15781                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15782                         return;
15783
15784                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15785                         TG3_NVM_BCVER_MAJSFT;
15786                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15787                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15788                          "v%d.%02d", major, minor);
15789         }
15790 }
15791
15792 static void tg3_read_hwsb_ver(struct tg3 *tp)
15793 {
15794         u32 val, major, minor;
15795
15796         /* Use native endian representation */
15797         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15798                 return;
15799
15800         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15801                 TG3_NVM_HWSB_CFG1_MAJSFT;
15802         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15803                 TG3_NVM_HWSB_CFG1_MINSFT;
15804
15805         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15806 }
15807
15808 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15809 {
15810         u32 offset, major, minor, build;
15811
15812         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15813
15814         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15815                 return;
15816
15817         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15818         case TG3_EEPROM_SB_REVISION_0:
15819                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15820                 break;
15821         case TG3_EEPROM_SB_REVISION_2:
15822                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15823                 break;
15824         case TG3_EEPROM_SB_REVISION_3:
15825                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15826                 break;
15827         case TG3_EEPROM_SB_REVISION_4:
15828                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15829                 break;
15830         case TG3_EEPROM_SB_REVISION_5:
15831                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15832                 break;
15833         case TG3_EEPROM_SB_REVISION_6:
15834                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15835                 break;
15836         default:
15837                 return;
15838         }
15839
15840         if (tg3_nvram_read(tp, offset, &val))
15841                 return;
15842
15843         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15844                 TG3_EEPROM_SB_EDH_BLD_SHFT;
15845         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15846                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15847         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15848
15849         if (minor > 99 || build > 26)
15850                 return;
15851
15852         offset = strlen(tp->fw_ver);
15853         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15854                  " v%d.%02d", major, minor);
15855
15856         if (build > 0) {
15857                 offset = strlen(tp->fw_ver);
15858                 if (offset < TG3_VER_SIZE - 1)
15859                         tp->fw_ver[offset] = 'a' + build - 1;
15860         }
15861 }
15862
15863 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15864 {
15865         u32 val, offset, start;
15866         int i, vlen;
15867
15868         for (offset = TG3_NVM_DIR_START;
15869              offset < TG3_NVM_DIR_END;
15870              offset += TG3_NVM_DIRENT_SIZE) {
15871                 if (tg3_nvram_read(tp, offset, &val))
15872                         return;
15873
15874                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15875                         break;
15876         }
15877
15878         if (offset == TG3_NVM_DIR_END)
15879                 return;
15880
15881         if (!tg3_flag(tp, 5705_PLUS))
15882                 start = 0x08000000;
15883         else if (tg3_nvram_read(tp, offset - 4, &start))
15884                 return;
15885
15886         if (tg3_nvram_read(tp, offset + 4, &offset) ||
15887             !tg3_fw_img_is_valid(tp, offset) ||
15888             tg3_nvram_read(tp, offset + 8, &val))
15889                 return;
15890
15891         offset += val - start;
15892
15893         vlen = strlen(tp->fw_ver);
15894
15895         tp->fw_ver[vlen++] = ',';
15896         tp->fw_ver[vlen++] = ' ';
15897
15898         for (i = 0; i < 4; i++) {
15899                 __be32 v;
15900                 if (tg3_nvram_read_be32(tp, offset, &v))
15901                         return;
15902
15903                 offset += sizeof(v);
15904
15905                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15906                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15907                         break;
15908                 }
15909
15910                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15911                 vlen += sizeof(v);
15912         }
15913 }
15914
15915 static void tg3_probe_ncsi(struct tg3 *tp)
15916 {
15917         u32 apedata;
15918
15919         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15920         if (apedata != APE_SEG_SIG_MAGIC)
15921                 return;
15922
15923         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15924         if (!(apedata & APE_FW_STATUS_READY))
15925                 return;
15926
15927         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15928                 tg3_flag_set(tp, APE_HAS_NCSI);
15929 }
15930
15931 static void tg3_read_dash_ver(struct tg3 *tp)
15932 {
15933         int vlen;
15934         u32 apedata;
15935         char *fwtype;
15936
15937         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15938
15939         if (tg3_flag(tp, APE_HAS_NCSI))
15940                 fwtype = "NCSI";
15941         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15942                 fwtype = "SMASH";
15943         else
15944                 fwtype = "DASH";
15945
15946         vlen = strlen(tp->fw_ver);
15947
15948         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15949                  fwtype,
15950                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15951                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15952                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15953                  (apedata & APE_FW_VERSION_BLDMSK));
15954 }
15955
15956 static void tg3_read_otp_ver(struct tg3 *tp)
15957 {
15958         u32 val, val2;
15959
15960         if (tg3_asic_rev(tp) != ASIC_REV_5762)
15961                 return;
15962
15963         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15964             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15965             TG3_OTP_MAGIC0_VALID(val)) {
15966                 u64 val64 = (u64) val << 32 | val2;
15967                 u32 ver = 0;
15968                 int i, vlen;
15969
15970                 for (i = 0; i < 7; i++) {
15971                         if ((val64 & 0xff) == 0)
15972                                 break;
15973                         ver = val64 & 0xff;
15974                         val64 >>= 8;
15975                 }
15976                 vlen = strlen(tp->fw_ver);
15977                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15978         }
15979 }
15980
15981 static void tg3_read_fw_ver(struct tg3 *tp)
15982 {
15983         u32 val;
15984         bool vpd_vers = false;
15985
15986         if (tp->fw_ver[0] != 0)
15987                 vpd_vers = true;
15988
15989         if (tg3_flag(tp, NO_NVRAM)) {
15990                 strcat(tp->fw_ver, "sb");
15991                 tg3_read_otp_ver(tp);
15992                 return;
15993         }
15994
15995         if (tg3_nvram_read(tp, 0, &val))
15996                 return;
15997
15998         if (val == TG3_EEPROM_MAGIC)
15999                 tg3_read_bc_ver(tp);
16000         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
16001                 tg3_read_sb_ver(tp, val);
16002         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16003                 tg3_read_hwsb_ver(tp);
16004
16005         if (tg3_flag(tp, ENABLE_ASF)) {
16006                 if (tg3_flag(tp, ENABLE_APE)) {
16007                         tg3_probe_ncsi(tp);
16008                         if (!vpd_vers)
16009                                 tg3_read_dash_ver(tp);
16010                 } else if (!vpd_vers) {
16011                         tg3_read_mgmtfw_ver(tp);
16012                 }
16013         }
16014
16015         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16016 }
16017
16018 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16019 {
16020         if (tg3_flag(tp, LRG_PROD_RING_CAP))
16021                 return TG3_RX_RET_MAX_SIZE_5717;
16022         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16023                 return TG3_RX_RET_MAX_SIZE_5700;
16024         else
16025                 return TG3_RX_RET_MAX_SIZE_5705;
16026 }
16027
16028 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16029         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16030         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16031         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16032         { },
16033 };
16034
16035 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16036 {
16037         struct pci_dev *peer;
16038         unsigned int func, devnr = tp->pdev->devfn & ~7;
16039
16040         for (func = 0; func < 8; func++) {
16041                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16042                 if (peer && peer != tp->pdev)
16043                         break;
16044                 pci_dev_put(peer);
16045         }
16046         /* 5704 can be configured in single-port mode, set peer to
16047          * tp->pdev in that case.
16048          */
16049         if (!peer) {
16050                 peer = tp->pdev;
16051                 return peer;
16052         }
16053
16054         /*
16055          * We don't need to keep the refcount elevated; there's no way
16056          * to remove one half of this device without removing the other
16057          */
16058         pci_dev_put(peer);
16059
16060         return peer;
16061 }
16062
16063 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16064 {
16065         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16066         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16067                 u32 reg;
16068
16069                 /* All devices that use the alternate
16070                  * ASIC REV location have a CPMU.
16071                  */
16072                 tg3_flag_set(tp, CPMU_PRESENT);
16073
16074                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16075                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16076                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16077                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16078                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16079                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16080                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16081                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16082                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16083                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16084                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16085                         reg = TG3PCI_GEN2_PRODID_ASICREV;
16086                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16087                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16088                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16089                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16090                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16091                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16092                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16093                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16094                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16095                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16096                         reg = TG3PCI_GEN15_PRODID_ASICREV;
16097                 else
16098                         reg = TG3PCI_PRODID_ASICREV;
16099
16100                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16101         }
16102
16103         /* Wrong chip ID in 5752 A0. This code can be removed later
16104          * as A0 is not in production.
16105          */
16106         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16107                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16108
16109         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16110                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16111
16112         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16113             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16114             tg3_asic_rev(tp) == ASIC_REV_5720)
16115                 tg3_flag_set(tp, 5717_PLUS);
16116
16117         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16118             tg3_asic_rev(tp) == ASIC_REV_57766)
16119                 tg3_flag_set(tp, 57765_CLASS);
16120
16121         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16122              tg3_asic_rev(tp) == ASIC_REV_5762)
16123                 tg3_flag_set(tp, 57765_PLUS);
16124
16125         /* Intentionally exclude ASIC_REV_5906 */
16126         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16127             tg3_asic_rev(tp) == ASIC_REV_5787 ||
16128             tg3_asic_rev(tp) == ASIC_REV_5784 ||
16129             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16130             tg3_asic_rev(tp) == ASIC_REV_5785 ||
16131             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16132             tg3_flag(tp, 57765_PLUS))
16133                 tg3_flag_set(tp, 5755_PLUS);
16134
16135         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16136             tg3_asic_rev(tp) == ASIC_REV_5714)
16137                 tg3_flag_set(tp, 5780_CLASS);
16138
16139         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16140             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16141             tg3_asic_rev(tp) == ASIC_REV_5906 ||
16142             tg3_flag(tp, 5755_PLUS) ||
16143             tg3_flag(tp, 5780_CLASS))
16144                 tg3_flag_set(tp, 5750_PLUS);
16145
16146         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16147             tg3_flag(tp, 5750_PLUS))
16148                 tg3_flag_set(tp, 5705_PLUS);
16149 }
16150
16151 static bool tg3_10_100_only_device(struct tg3 *tp,
16152                                    const struct pci_device_id *ent)
16153 {
16154         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16155
16156         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16157              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16158             (tp->phy_flags & TG3_PHYFLG_IS_FET))
16159                 return true;
16160
16161         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16162                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16163                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16164                                 return true;
16165                 } else {
16166                         return true;
16167                 }
16168         }
16169
16170         return false;
16171 }
16172
16173 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16174 {
16175         u32 misc_ctrl_reg;
16176         u32 pci_state_reg, grc_misc_cfg;
16177         u32 val;
16178         u16 pci_cmd;
16179         int err;
16180
16181         /* Force memory write invalidate off.  If we leave it on,
16182          * then on 5700_BX chips we have to enable a workaround.
16183          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16184          * to match the cacheline size.  The Broadcom driver have this
16185          * workaround but turns MWI off all the times so never uses
16186          * it.  This seems to suggest that the workaround is insufficient.
16187          */
16188         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16189         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16190         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16191
16192         /* Important! -- Make sure register accesses are byteswapped
16193          * correctly.  Also, for those chips that require it, make
16194          * sure that indirect register accesses are enabled before
16195          * the first operation.
16196          */
16197         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16198                               &misc_ctrl_reg);
16199         tp->misc_host_ctrl |= (misc_ctrl_reg &
16200                                MISC_HOST_CTRL_CHIPREV);
16201         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16202                                tp->misc_host_ctrl);
16203
16204         tg3_detect_asic_rev(tp, misc_ctrl_reg);
16205
16206         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16207          * we need to disable memory and use config. cycles
16208          * only to access all registers. The 5702/03 chips
16209          * can mistakenly decode the special cycles from the
16210          * ICH chipsets as memory write cycles, causing corruption
16211          * of register and memory space. Only certain ICH bridges
16212          * will drive special cycles with non-zero data during the
16213          * address phase which can fall within the 5703's address
16214          * range. This is not an ICH bug as the PCI spec allows
16215          * non-zero address during special cycles. However, only
16216          * these ICH bridges are known to drive non-zero addresses
16217          * during special cycles.
16218          *
16219          * Since special cycles do not cross PCI bridges, we only
16220          * enable this workaround if the 5703 is on the secondary
16221          * bus of these ICH bridges.
16222          */
16223         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16224             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16225                 static struct tg3_dev_id {
16226                         u32     vendor;
16227                         u32     device;
16228                         u32     rev;
16229                 } ich_chipsets[] = {
16230                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16231                           PCI_ANY_ID },
16232                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16233                           PCI_ANY_ID },
16234                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16235                           0xa },
16236                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16237                           PCI_ANY_ID },
16238                         { },
16239                 };
16240                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16241                 struct pci_dev *bridge = NULL;
16242
16243                 while (pci_id->vendor != 0) {
16244                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
16245                                                 bridge);
16246                         if (!bridge) {
16247                                 pci_id++;
16248                                 continue;
16249                         }
16250                         if (pci_id->rev != PCI_ANY_ID) {
16251                                 if (bridge->revision > pci_id->rev)
16252                                         continue;
16253                         }
16254                         if (bridge->subordinate &&
16255                             (bridge->subordinate->number ==
16256                              tp->pdev->bus->number)) {
16257                                 tg3_flag_set(tp, ICH_WORKAROUND);
16258                                 pci_dev_put(bridge);
16259                                 break;
16260                         }
16261                 }
16262         }
16263
16264         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16265                 static struct tg3_dev_id {
16266                         u32     vendor;
16267                         u32     device;
16268                 } bridge_chipsets[] = {
16269                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16270                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16271                         { },
16272                 };
16273                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16274                 struct pci_dev *bridge = NULL;
16275
16276                 while (pci_id->vendor != 0) {
16277                         bridge = pci_get_device(pci_id->vendor,
16278                                                 pci_id->device,
16279                                                 bridge);
16280                         if (!bridge) {
16281                                 pci_id++;
16282                                 continue;
16283                         }
16284                         if (bridge->subordinate &&
16285                             (bridge->subordinate->number <=
16286                              tp->pdev->bus->number) &&
16287                             (bridge->subordinate->busn_res.end >=
16288                              tp->pdev->bus->number)) {
16289                                 tg3_flag_set(tp, 5701_DMA_BUG);
16290                                 pci_dev_put(bridge);
16291                                 break;
16292                         }
16293                 }
16294         }
16295
16296         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16297          * DMA addresses > 40-bit. This bridge may have other additional
16298          * 57xx devices behind it in some 4-port NIC designs for example.
16299          * Any tg3 device found behind the bridge will also need the 40-bit
16300          * DMA workaround.
16301          */
16302         if (tg3_flag(tp, 5780_CLASS)) {
16303                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16304                 tp->msi_cap = tp->pdev->msi_cap;
16305         } else {
16306                 struct pci_dev *bridge = NULL;
16307
16308                 do {
16309                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16310                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
16311                                                 bridge);
16312                         if (bridge && bridge->subordinate &&
16313                             (bridge->subordinate->number <=
16314                              tp->pdev->bus->number) &&
16315                             (bridge->subordinate->busn_res.end >=
16316                              tp->pdev->bus->number)) {
16317                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16318                                 pci_dev_put(bridge);
16319                                 break;
16320                         }
16321                 } while (bridge);
16322         }
16323
16324         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16325             tg3_asic_rev(tp) == ASIC_REV_5714)
16326                 tp->pdev_peer = tg3_find_peer(tp);
16327
16328         /* Determine TSO capabilities */
16329         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16330                 ; /* Do nothing. HW bug. */
16331         else if (tg3_flag(tp, 57765_PLUS))
16332                 tg3_flag_set(tp, HW_TSO_3);
16333         else if (tg3_flag(tp, 5755_PLUS) ||
16334                  tg3_asic_rev(tp) == ASIC_REV_5906)
16335                 tg3_flag_set(tp, HW_TSO_2);
16336         else if (tg3_flag(tp, 5750_PLUS)) {
16337                 tg3_flag_set(tp, HW_TSO_1);
16338                 tg3_flag_set(tp, TSO_BUG);
16339                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16340                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16341                         tg3_flag_clear(tp, TSO_BUG);
16342         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16343                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
16344                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16345                 tg3_flag_set(tp, FW_TSO);
16346                 tg3_flag_set(tp, TSO_BUG);
16347                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16348                         tp->fw_needed = FIRMWARE_TG3TSO5;
16349                 else
16350                         tp->fw_needed = FIRMWARE_TG3TSO;
16351         }
16352
16353         /* Selectively allow TSO based on operating conditions */
16354         if (tg3_flag(tp, HW_TSO_1) ||
16355             tg3_flag(tp, HW_TSO_2) ||
16356             tg3_flag(tp, HW_TSO_3) ||
16357             tg3_flag(tp, FW_TSO)) {
16358                 /* For firmware TSO, assume ASF is disabled.
16359                  * We'll disable TSO later if we discover ASF
16360                  * is enabled in tg3_get_eeprom_hw_cfg().
16361                  */
16362                 tg3_flag_set(tp, TSO_CAPABLE);
16363         } else {
16364                 tg3_flag_clear(tp, TSO_CAPABLE);
16365                 tg3_flag_clear(tp, TSO_BUG);
16366                 tp->fw_needed = NULL;
16367         }
16368
16369         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16370                 tp->fw_needed = FIRMWARE_TG3;
16371
16372         if (tg3_asic_rev(tp) == ASIC_REV_57766)
16373                 tp->fw_needed = FIRMWARE_TG357766;
16374
16375         tp->irq_max = 1;
16376
16377         if (tg3_flag(tp, 5750_PLUS)) {
16378                 tg3_flag_set(tp, SUPPORT_MSI);
16379                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16380                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16381                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16382                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16383                      tp->pdev_peer == tp->pdev))
16384                         tg3_flag_clear(tp, SUPPORT_MSI);
16385
16386                 if (tg3_flag(tp, 5755_PLUS) ||
16387                     tg3_asic_rev(tp) == ASIC_REV_5906) {
16388                         tg3_flag_set(tp, 1SHOT_MSI);
16389                 }
16390
16391                 if (tg3_flag(tp, 57765_PLUS)) {
16392                         tg3_flag_set(tp, SUPPORT_MSIX);
16393                         tp->irq_max = TG3_IRQ_MAX_VECS;
16394                 }
16395         }
16396
16397         tp->txq_max = 1;
16398         tp->rxq_max = 1;
16399         if (tp->irq_max > 1) {
16400                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16401                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16402
16403                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16404                     tg3_asic_rev(tp) == ASIC_REV_5720)
16405                         tp->txq_max = tp->irq_max - 1;
16406         }
16407
16408         if (tg3_flag(tp, 5755_PLUS) ||
16409             tg3_asic_rev(tp) == ASIC_REV_5906)
16410                 tg3_flag_set(tp, SHORT_DMA_BUG);
16411
16412         if (tg3_asic_rev(tp) == ASIC_REV_5719)
16413                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16414
16415         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16416             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16417             tg3_asic_rev(tp) == ASIC_REV_5720 ||
16418             tg3_asic_rev(tp) == ASIC_REV_5762)
16419                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16420
16421         if (tg3_flag(tp, 57765_PLUS) &&
16422             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16423                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16424
16425         if (!tg3_flag(tp, 5705_PLUS) ||
16426             tg3_flag(tp, 5780_CLASS) ||
16427             tg3_flag(tp, USE_JUMBO_BDFLAG))
16428                 tg3_flag_set(tp, JUMBO_CAPABLE);
16429
16430         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16431                               &pci_state_reg);
16432
16433         if (pci_is_pcie(tp->pdev)) {
16434                 u16 lnkctl;
16435
16436                 tg3_flag_set(tp, PCI_EXPRESS);
16437
16438                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16439                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16440                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16441                                 tg3_flag_clear(tp, HW_TSO_2);
16442                                 tg3_flag_clear(tp, TSO_CAPABLE);
16443                         }
16444                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16445                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16446                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16447                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16448                                 tg3_flag_set(tp, CLKREQ_BUG);
16449                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16450                         tg3_flag_set(tp, L1PLLPD_EN);
16451                 }
16452         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16453                 /* BCM5785 devices are effectively PCIe devices, and should
16454                  * follow PCIe codepaths, but do not have a PCIe capabilities
16455                  * section.
16456                  */
16457                 tg3_flag_set(tp, PCI_EXPRESS);
16458         } else if (!tg3_flag(tp, 5705_PLUS) ||
16459                    tg3_flag(tp, 5780_CLASS)) {
16460                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16461                 if (!tp->pcix_cap) {
16462                         dev_err(&tp->pdev->dev,
16463                                 "Cannot find PCI-X capability, aborting\n");
16464                         return -EIO;
16465                 }
16466
16467                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16468                         tg3_flag_set(tp, PCIX_MODE);
16469         }
16470
16471         /* If we have an AMD 762 or VIA K8T800 chipset, write
16472          * reordering to the mailbox registers done by the host
16473          * controller can cause major troubles.  We read back from
16474          * every mailbox register write to force the writes to be
16475          * posted to the chip in order.
16476          */
16477         if (pci_dev_present(tg3_write_reorder_chipsets) &&
16478             !tg3_flag(tp, PCI_EXPRESS))
16479                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16480
16481         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16482                              &tp->pci_cacheline_sz);
16483         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16484                              &tp->pci_lat_timer);
16485         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16486             tp->pci_lat_timer < 64) {
16487                 tp->pci_lat_timer = 64;
16488                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16489                                       tp->pci_lat_timer);
16490         }
16491
16492         /* Important! -- It is critical that the PCI-X hw workaround
16493          * situation is decided before the first MMIO register access.
16494          */
16495         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16496                 /* 5700 BX chips need to have their TX producer index
16497                  * mailboxes written twice to workaround a bug.
16498                  */
16499                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16500
16501                 /* If we are in PCI-X mode, enable register write workaround.
16502                  *
16503                  * The workaround is to use indirect register accesses
16504                  * for all chip writes not to mailbox registers.
16505                  */
16506                 if (tg3_flag(tp, PCIX_MODE)) {
16507                         u32 pm_reg;
16508
16509                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16510
16511                         /* The chip can have it's power management PCI config
16512                          * space registers clobbered due to this bug.
16513                          * So explicitly force the chip into D0 here.
16514                          */
16515                         pci_read_config_dword(tp->pdev,
16516                                               tp->pdev->pm_cap + PCI_PM_CTRL,
16517                                               &pm_reg);
16518                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16519                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16520                         pci_write_config_dword(tp->pdev,
16521                                                tp->pdev->pm_cap + PCI_PM_CTRL,
16522                                                pm_reg);
16523
16524                         /* Also, force SERR#/PERR# in PCI command. */
16525                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16526                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16527                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16528                 }
16529         }
16530
16531         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16532                 tg3_flag_set(tp, PCI_HIGH_SPEED);
16533         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16534                 tg3_flag_set(tp, PCI_32BIT);
16535
16536         /* Chip-specific fixup from Broadcom driver */
16537         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16538             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16539                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16540                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16541         }
16542
16543         /* Default fast path register access methods */
16544         tp->read32 = tg3_read32;
16545         tp->write32 = tg3_write32;
16546         tp->read32_mbox = tg3_read32;
16547         tp->write32_mbox = tg3_write32;
16548         tp->write32_tx_mbox = tg3_write32;
16549         tp->write32_rx_mbox = tg3_write32;
16550
16551         /* Various workaround register access methods */
16552         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16553                 tp->write32 = tg3_write_indirect_reg32;
16554         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16555                  (tg3_flag(tp, PCI_EXPRESS) &&
16556                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16557                 /*
16558                  * Back to back register writes can cause problems on these
16559                  * chips, the workaround is to read back all reg writes
16560                  * except those to mailbox regs.
16561                  *
16562                  * See tg3_write_indirect_reg32().
16563                  */
16564                 tp->write32 = tg3_write_flush_reg32;
16565         }
16566
16567         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16568                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16569                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16570                         tp->write32_rx_mbox = tg3_write_flush_reg32;
16571         }
16572
16573         if (tg3_flag(tp, ICH_WORKAROUND)) {
16574                 tp->read32 = tg3_read_indirect_reg32;
16575                 tp->write32 = tg3_write_indirect_reg32;
16576                 tp->read32_mbox = tg3_read_indirect_mbox;
16577                 tp->write32_mbox = tg3_write_indirect_mbox;
16578                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16579                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16580
16581                 iounmap(tp->regs);
16582                 tp->regs = NULL;
16583
16584                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16585                 pci_cmd &= ~PCI_COMMAND_MEMORY;
16586                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16587         }
16588         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16589                 tp->read32_mbox = tg3_read32_mbox_5906;
16590                 tp->write32_mbox = tg3_write32_mbox_5906;
16591                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16592                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16593         }
16594
16595         if (tp->write32 == tg3_write_indirect_reg32 ||
16596             (tg3_flag(tp, PCIX_MODE) &&
16597              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16598               tg3_asic_rev(tp) == ASIC_REV_5701)))
16599                 tg3_flag_set(tp, SRAM_USE_CONFIG);
16600
16601         /* The memory arbiter has to be enabled in order for SRAM accesses
16602          * to succeed.  Normally on powerup the tg3 chip firmware will make
16603          * sure it is enabled, but other entities such as system netboot
16604          * code might disable it.
16605          */
16606         val = tr32(MEMARB_MODE);
16607         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16608
16609         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16610         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16611             tg3_flag(tp, 5780_CLASS)) {
16612                 if (tg3_flag(tp, PCIX_MODE)) {
16613                         pci_read_config_dword(tp->pdev,
16614                                               tp->pcix_cap + PCI_X_STATUS,
16615                                               &val);
16616                         tp->pci_fn = val & 0x7;
16617                 }
16618         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16619                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16620                    tg3_asic_rev(tp) == ASIC_REV_5720) {
16621                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16622                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16623                         val = tr32(TG3_CPMU_STATUS);
16624
16625                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16626                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16627                 else
16628                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16629                                      TG3_CPMU_STATUS_FSHFT_5719;
16630         }
16631
16632         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16633                 tp->write32_tx_mbox = tg3_write_flush_reg32;
16634                 tp->write32_rx_mbox = tg3_write_flush_reg32;
16635         }
16636
16637         /* Get eeprom hw config before calling tg3_set_power_state().
16638          * In particular, the TG3_FLAG_IS_NIC flag must be
16639          * determined before calling tg3_set_power_state() so that
16640          * we know whether or not to switch out of Vaux power.
16641          * When the flag is set, it means that GPIO1 is used for eeprom
16642          * write protect and also implies that it is a LOM where GPIOs
16643          * are not used to switch power.
16644          */
16645         tg3_get_eeprom_hw_cfg(tp);
16646
16647         if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16648                 tg3_flag_clear(tp, TSO_CAPABLE);
16649                 tg3_flag_clear(tp, TSO_BUG);
16650                 tp->fw_needed = NULL;
16651         }
16652
16653         if (tg3_flag(tp, ENABLE_APE)) {
16654                 /* Allow reads and writes to the
16655                  * APE register and memory space.
16656                  */
16657                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16658                                  PCISTATE_ALLOW_APE_SHMEM_WR |
16659                                  PCISTATE_ALLOW_APE_PSPACE_WR;
16660                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16661                                        pci_state_reg);
16662
16663                 tg3_ape_lock_init(tp);
16664         }
16665
16666         /* Set up tp->grc_local_ctrl before calling
16667          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16668          * will bring 5700's external PHY out of reset.
16669          * It is also used as eeprom write protect on LOMs.
16670          */
16671         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16672         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16673             tg3_flag(tp, EEPROM_WRITE_PROT))
16674                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16675                                        GRC_LCLCTRL_GPIO_OUTPUT1);
16676         /* Unused GPIO3 must be driven as output on 5752 because there
16677          * are no pull-up resistors on unused GPIO pins.
16678          */
16679         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16680                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16681
16682         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16683             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16684             tg3_flag(tp, 57765_CLASS))
16685                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16686
16687         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16688             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16689                 /* Turn off the debug UART. */
16690                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16691                 if (tg3_flag(tp, IS_NIC))
16692                         /* Keep VMain power. */
16693                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16694                                               GRC_LCLCTRL_GPIO_OUTPUT0;
16695         }
16696
16697         if (tg3_asic_rev(tp) == ASIC_REV_5762)
16698                 tp->grc_local_ctrl |=
16699                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16700
16701         /* Switch out of Vaux if it is a NIC */
16702         tg3_pwrsrc_switch_to_vmain(tp);
16703
16704         /* Derive initial jumbo mode from MTU assigned in
16705          * ether_setup() via the alloc_etherdev() call
16706          */
16707         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16708                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16709
16710         /* Determine WakeOnLan speed to use. */
16711         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16712             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16713             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16714             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16715                 tg3_flag_clear(tp, WOL_SPEED_100MB);
16716         } else {
16717                 tg3_flag_set(tp, WOL_SPEED_100MB);
16718         }
16719
16720         if (tg3_asic_rev(tp) == ASIC_REV_5906)
16721                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16722
16723         /* A few boards don't want Ethernet@WireSpeed phy feature */
16724         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16725             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16726              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16727              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16728             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16729             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16730                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16731
16732         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16733             tg3_chip_rev(tp) == CHIPREV_5704_AX)
16734                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16735         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16736                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16737
16738         if (tg3_flag(tp, 5705_PLUS) &&
16739             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16740             tg3_asic_rev(tp) != ASIC_REV_5785 &&
16741             tg3_asic_rev(tp) != ASIC_REV_57780 &&
16742             !tg3_flag(tp, 57765_PLUS)) {
16743                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16744                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
16745                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
16746                     tg3_asic_rev(tp) == ASIC_REV_5761) {
16747                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16748                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16749                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16750                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16751                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16752                 } else
16753                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16754         }
16755
16756         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16757             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16758                 tp->phy_otp = tg3_read_otp_phycfg(tp);
16759                 if (tp->phy_otp == 0)
16760                         tp->phy_otp = TG3_OTP_DEFAULT;
16761         }
16762
16763         if (tg3_flag(tp, CPMU_PRESENT))
16764                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16765         else
16766                 tp->mi_mode = MAC_MI_MODE_BASE;
16767
16768         tp->coalesce_mode = 0;
16769         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16770             tg3_chip_rev(tp) != CHIPREV_5700_BX)
16771                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16772
16773         /* Set these bits to enable statistics workaround. */
16774         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16775             tg3_asic_rev(tp) == ASIC_REV_5762 ||
16776             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16777             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16778                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16779                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16780         }
16781
16782         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16783             tg3_asic_rev(tp) == ASIC_REV_57780)
16784                 tg3_flag_set(tp, USE_PHYLIB);
16785
16786         err = tg3_mdio_init(tp);
16787         if (err)
16788                 return err;
16789
16790         /* Initialize data/descriptor byte/word swapping. */
16791         val = tr32(GRC_MODE);
16792         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16793             tg3_asic_rev(tp) == ASIC_REV_5762)
16794                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16795                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
16796                         GRC_MODE_B2HRX_ENABLE |
16797                         GRC_MODE_HTX2B_ENABLE |
16798                         GRC_MODE_HOST_STACKUP);
16799         else
16800                 val &= GRC_MODE_HOST_STACKUP;
16801
16802         tw32(GRC_MODE, val | tp->grc_mode);
16803
16804         tg3_switch_clocks(tp);
16805
16806         /* Clear this out for sanity. */
16807         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16808
16809         /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16810         tw32(TG3PCI_REG_BASE_ADDR, 0);
16811
16812         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16813                               &pci_state_reg);
16814         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16815             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16816                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16817                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16818                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16819                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16820                         void __iomem *sram_base;
16821
16822                         /* Write some dummy words into the SRAM status block
16823                          * area, see if it reads back correctly.  If the return
16824                          * value is bad, force enable the PCIX workaround.
16825                          */
16826                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16827
16828                         writel(0x00000000, sram_base);
16829                         writel(0x00000000, sram_base + 4);
16830                         writel(0xffffffff, sram_base + 4);
16831                         if (readl(sram_base) != 0x00000000)
16832                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16833                 }
16834         }
16835
16836         udelay(50);
16837         tg3_nvram_init(tp);
16838
16839         /* If the device has an NVRAM, no need to load patch firmware */
16840         if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16841             !tg3_flag(tp, NO_NVRAM))
16842                 tp->fw_needed = NULL;
16843
16844         grc_misc_cfg = tr32(GRC_MISC_CFG);
16845         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16846
16847         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16848             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16849              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16850                 tg3_flag_set(tp, IS_5788);
16851
16852         if (!tg3_flag(tp, IS_5788) &&
16853             tg3_asic_rev(tp) != ASIC_REV_5700)
16854                 tg3_flag_set(tp, TAGGED_STATUS);
16855         if (tg3_flag(tp, TAGGED_STATUS)) {
16856                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16857                                       HOSTCC_MODE_CLRTICK_TXBD);
16858
16859                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16860                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16861                                        tp->misc_host_ctrl);
16862         }
16863
16864         /* Preserve the APE MAC_MODE bits */
16865         if (tg3_flag(tp, ENABLE_APE))
16866                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16867         else
16868                 tp->mac_mode = 0;
16869
16870         if (tg3_10_100_only_device(tp, ent))
16871                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16872
16873         err = tg3_phy_probe(tp);
16874         if (err) {
16875                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16876                 /* ... but do not return immediately ... */
16877                 tg3_mdio_fini(tp);
16878         }
16879
16880         tg3_read_vpd(tp);
16881         tg3_read_fw_ver(tp);
16882
16883         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16884                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16885         } else {
16886                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16887                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16888                 else
16889                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16890         }
16891
16892         /* 5700 {AX,BX} chips have a broken status block link
16893          * change bit implementation, so we must use the
16894          * status register in those cases.
16895          */
16896         if (tg3_asic_rev(tp) == ASIC_REV_5700)
16897                 tg3_flag_set(tp, USE_LINKCHG_REG);
16898         else
16899                 tg3_flag_clear(tp, USE_LINKCHG_REG);
16900
16901         /* The led_ctrl is set during tg3_phy_probe, here we might
16902          * have to force the link status polling mechanism based
16903          * upon subsystem IDs.
16904          */
16905         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16906             tg3_asic_rev(tp) == ASIC_REV_5701 &&
16907             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16908                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16909                 tg3_flag_set(tp, USE_LINKCHG_REG);
16910         }
16911
16912         /* For all SERDES we poll the MAC status register. */
16913         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16914                 tg3_flag_set(tp, POLL_SERDES);
16915         else
16916                 tg3_flag_clear(tp, POLL_SERDES);
16917
16918         if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16919                 tg3_flag_set(tp, POLL_CPMU_LINK);
16920
16921         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16922         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16923         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16924             tg3_flag(tp, PCIX_MODE)) {
16925                 tp->rx_offset = NET_SKB_PAD;
16926 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16927                 tp->rx_copy_thresh = ~(u16)0;
16928 #endif
16929         }
16930
16931         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16932         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16933         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16934
16935         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16936
16937         /* Increment the rx prod index on the rx std ring by at most
16938          * 8 for these chips to workaround hw errata.
16939          */
16940         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16941             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16942             tg3_asic_rev(tp) == ASIC_REV_5755)
16943                 tp->rx_std_max_post = 8;
16944
16945         if (tg3_flag(tp, ASPM_WORKAROUND))
16946                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16947                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
16948
16949         return err;
16950 }
16951
16952 #ifdef CONFIG_SPARC
16953 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16954 {
16955         struct net_device *dev = tp->dev;
16956         struct pci_dev *pdev = tp->pdev;
16957         struct device_node *dp = pci_device_to_OF_node(pdev);
16958         const unsigned char *addr;
16959         int len;
16960
16961         addr = of_get_property(dp, "local-mac-address", &len);
16962         if (addr && len == ETH_ALEN) {
16963                 memcpy(dev->dev_addr, addr, ETH_ALEN);
16964                 return 0;
16965         }
16966         return -ENODEV;
16967 }
16968
16969 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16970 {
16971         struct net_device *dev = tp->dev;
16972
16973         memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
16974         return 0;
16975 }
16976 #endif
16977
16978 static int tg3_get_device_address(struct tg3 *tp)
16979 {
16980         struct net_device *dev = tp->dev;
16981         u32 hi, lo, mac_offset;
16982         int addr_ok = 0;
16983         int err;
16984
16985 #ifdef CONFIG_SPARC
16986         if (!tg3_get_macaddr_sparc(tp))
16987                 return 0;
16988 #endif
16989
16990         if (tg3_flag(tp, IS_SSB_CORE)) {
16991                 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16992                 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16993                         return 0;
16994         }
16995
16996         mac_offset = 0x7c;
16997         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16998             tg3_flag(tp, 5780_CLASS)) {
16999                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
17000                         mac_offset = 0xcc;
17001                 if (tg3_nvram_lock(tp))
17002                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
17003                 else
17004                         tg3_nvram_unlock(tp);
17005         } else if (tg3_flag(tp, 5717_PLUS)) {
17006                 if (tp->pci_fn & 1)
17007                         mac_offset = 0xcc;
17008                 if (tp->pci_fn > 1)
17009                         mac_offset += 0x18c;
17010         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17011                 mac_offset = 0x10;
17012
17013         /* First try to get it from MAC address mailbox. */
17014         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17015         if ((hi >> 16) == 0x484b) {
17016                 dev->dev_addr[0] = (hi >>  8) & 0xff;
17017                 dev->dev_addr[1] = (hi >>  0) & 0xff;
17018
17019                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17020                 dev->dev_addr[2] = (lo >> 24) & 0xff;
17021                 dev->dev_addr[3] = (lo >> 16) & 0xff;
17022                 dev->dev_addr[4] = (lo >>  8) & 0xff;
17023                 dev->dev_addr[5] = (lo >>  0) & 0xff;
17024
17025                 /* Some old bootcode may report a 0 MAC address in SRAM */
17026                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
17027         }
17028         if (!addr_ok) {
17029                 /* Next, try NVRAM. */
17030                 if (!tg3_flag(tp, NO_NVRAM) &&
17031                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
17032                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
17033                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
17034                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
17035                 }
17036                 /* Finally just fetch it out of the MAC control regs. */
17037                 else {
17038                         hi = tr32(MAC_ADDR_0_HIGH);
17039                         lo = tr32(MAC_ADDR_0_LOW);
17040
17041                         dev->dev_addr[5] = lo & 0xff;
17042                         dev->dev_addr[4] = (lo >> 8) & 0xff;
17043                         dev->dev_addr[3] = (lo >> 16) & 0xff;
17044                         dev->dev_addr[2] = (lo >> 24) & 0xff;
17045                         dev->dev_addr[1] = hi & 0xff;
17046                         dev->dev_addr[0] = (hi >> 8) & 0xff;
17047                 }
17048         }
17049
17050         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
17051 #ifdef CONFIG_SPARC
17052                 if (!tg3_get_default_macaddr_sparc(tp))
17053                         return 0;
17054 #endif
17055                 return -EINVAL;
17056         }
17057         return 0;
17058 }
17059
17060 #define BOUNDARY_SINGLE_CACHELINE       1
17061 #define BOUNDARY_MULTI_CACHELINE        2
17062
17063 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17064 {
17065         int cacheline_size;
17066         u8 byte;
17067         int goal;
17068
17069         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17070         if (byte == 0)
17071                 cacheline_size = 1024;
17072         else
17073                 cacheline_size = (int) byte * 4;
17074
17075         /* On 5703 and later chips, the boundary bits have no
17076          * effect.
17077          */
17078         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17079             tg3_asic_rev(tp) != ASIC_REV_5701 &&
17080             !tg3_flag(tp, PCI_EXPRESS))
17081                 goto out;
17082
17083 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17084         goal = BOUNDARY_MULTI_CACHELINE;
17085 #else
17086 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17087         goal = BOUNDARY_SINGLE_CACHELINE;
17088 #else
17089         goal = 0;
17090 #endif
17091 #endif
17092
17093         if (tg3_flag(tp, 57765_PLUS)) {
17094                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17095                 goto out;
17096         }
17097
17098         if (!goal)
17099                 goto out;
17100
17101         /* PCI controllers on most RISC systems tend to disconnect
17102          * when a device tries to burst across a cache-line boundary.
17103          * Therefore, letting tg3 do so just wastes PCI bandwidth.
17104          *
17105          * Unfortunately, for PCI-E there are only limited
17106          * write-side controls for this, and thus for reads
17107          * we will still get the disconnects.  We'll also waste
17108          * these PCI cycles for both read and write for chips
17109          * other than 5700 and 5701 which do not implement the
17110          * boundary bits.
17111          */
17112         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17113                 switch (cacheline_size) {
17114                 case 16:
17115                 case 32:
17116                 case 64:
17117                 case 128:
17118                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17119                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17120                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17121                         } else {
17122                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17123                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17124                         }
17125                         break;
17126
17127                 case 256:
17128                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17129                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17130                         break;
17131
17132                 default:
17133                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17134                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17135                         break;
17136                 }
17137         } else if (tg3_flag(tp, PCI_EXPRESS)) {
17138                 switch (cacheline_size) {
17139                 case 16:
17140                 case 32:
17141                 case 64:
17142                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17143                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17144                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17145                                 break;
17146                         }
17147                         /* fallthrough */
17148                 case 128:
17149                 default:
17150                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17151                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17152                         break;
17153                 }
17154         } else {
17155                 switch (cacheline_size) {
17156                 case 16:
17157                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17158                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17159                                         DMA_RWCTRL_WRITE_BNDRY_16);
17160                                 break;
17161                         }
17162                         /* fallthrough */
17163                 case 32:
17164                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17165                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17166                                         DMA_RWCTRL_WRITE_BNDRY_32);
17167                                 break;
17168                         }
17169                         /* fallthrough */
17170                 case 64:
17171                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17172                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17173                                         DMA_RWCTRL_WRITE_BNDRY_64);
17174                                 break;
17175                         }
17176                         /* fallthrough */
17177                 case 128:
17178                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17179                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17180                                         DMA_RWCTRL_WRITE_BNDRY_128);
17181                                 break;
17182                         }
17183                         /* fallthrough */
17184                 case 256:
17185                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
17186                                 DMA_RWCTRL_WRITE_BNDRY_256);
17187                         break;
17188                 case 512:
17189                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
17190                                 DMA_RWCTRL_WRITE_BNDRY_512);
17191                         break;
17192                 case 1024:
17193                 default:
17194                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17195                                 DMA_RWCTRL_WRITE_BNDRY_1024);
17196                         break;
17197                 }
17198         }
17199
17200 out:
17201         return val;
17202 }
17203
17204 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17205                            int size, bool to_device)
17206 {
17207         struct tg3_internal_buffer_desc test_desc;
17208         u32 sram_dma_descs;
17209         int i, ret;
17210
17211         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17212
17213         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17214         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17215         tw32(RDMAC_STATUS, 0);
17216         tw32(WDMAC_STATUS, 0);
17217
17218         tw32(BUFMGR_MODE, 0);
17219         tw32(FTQ_RESET, 0);
17220
17221         test_desc.addr_hi = ((u64) buf_dma) >> 32;
17222         test_desc.addr_lo = buf_dma & 0xffffffff;
17223         test_desc.nic_mbuf = 0x00002100;
17224         test_desc.len = size;
17225
17226         /*
17227          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17228          * the *second* time the tg3 driver was getting loaded after an
17229          * initial scan.
17230          *
17231          * Broadcom tells me:
17232          *   ...the DMA engine is connected to the GRC block and a DMA
17233          *   reset may affect the GRC block in some unpredictable way...
17234          *   The behavior of resets to individual blocks has not been tested.
17235          *
17236          * Broadcom noted the GRC reset will also reset all sub-components.
17237          */
17238         if (to_device) {
17239                 test_desc.cqid_sqid = (13 << 8) | 2;
17240
17241                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17242                 udelay(40);
17243         } else {
17244                 test_desc.cqid_sqid = (16 << 8) | 7;
17245
17246                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17247                 udelay(40);
17248         }
17249         test_desc.flags = 0x00000005;
17250
17251         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17252                 u32 val;
17253
17254                 val = *(((u32 *)&test_desc) + i);
17255                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17256                                        sram_dma_descs + (i * sizeof(u32)));
17257                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17258         }
17259         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17260
17261         if (to_device)
17262                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17263         else
17264                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17265
17266         ret = -ENODEV;
17267         for (i = 0; i < 40; i++) {
17268                 u32 val;
17269
17270                 if (to_device)
17271                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17272                 else
17273                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17274                 if ((val & 0xffff) == sram_dma_descs) {
17275                         ret = 0;
17276                         break;
17277                 }
17278
17279                 udelay(100);
17280         }
17281
17282         return ret;
17283 }
17284
17285 #define TEST_BUFFER_SIZE        0x2000
17286
17287 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17288         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17289         { },
17290 };
17291
17292 static int tg3_test_dma(struct tg3 *tp)
17293 {
17294         dma_addr_t buf_dma;
17295         u32 *buf, saved_dma_rwctrl;
17296         int ret = 0;
17297
17298         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17299                                  &buf_dma, GFP_KERNEL);
17300         if (!buf) {
17301                 ret = -ENOMEM;
17302                 goto out_nofree;
17303         }
17304
17305         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17306                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17307
17308         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17309
17310         if (tg3_flag(tp, 57765_PLUS))
17311                 goto out;
17312
17313         if (tg3_flag(tp, PCI_EXPRESS)) {
17314                 /* DMA read watermark not used on PCIE */
17315                 tp->dma_rwctrl |= 0x00180000;
17316         } else if (!tg3_flag(tp, PCIX_MODE)) {
17317                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17318                     tg3_asic_rev(tp) == ASIC_REV_5750)
17319                         tp->dma_rwctrl |= 0x003f0000;
17320                 else
17321                         tp->dma_rwctrl |= 0x003f000f;
17322         } else {
17323                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17324                     tg3_asic_rev(tp) == ASIC_REV_5704) {
17325                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17326                         u32 read_water = 0x7;
17327
17328                         /* If the 5704 is behind the EPB bridge, we can
17329                          * do the less restrictive ONE_DMA workaround for
17330                          * better performance.
17331                          */
17332                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17333                             tg3_asic_rev(tp) == ASIC_REV_5704)
17334                                 tp->dma_rwctrl |= 0x8000;
17335                         else if (ccval == 0x6 || ccval == 0x7)
17336                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17337
17338                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
17339                                 read_water = 4;
17340                         /* Set bit 23 to enable PCIX hw bug fix */
17341                         tp->dma_rwctrl |=
17342                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17343                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17344                                 (1 << 23);
17345                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17346                         /* 5780 always in PCIX mode */
17347                         tp->dma_rwctrl |= 0x00144000;
17348                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17349                         /* 5714 always in PCIX mode */
17350                         tp->dma_rwctrl |= 0x00148000;
17351                 } else {
17352                         tp->dma_rwctrl |= 0x001b000f;
17353                 }
17354         }
17355         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17356                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17357
17358         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17359             tg3_asic_rev(tp) == ASIC_REV_5704)
17360                 tp->dma_rwctrl &= 0xfffffff0;
17361
17362         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17363             tg3_asic_rev(tp) == ASIC_REV_5701) {
17364                 /* Remove this if it causes problems for some boards. */
17365                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17366
17367                 /* On 5700/5701 chips, we need to set this bit.
17368                  * Otherwise the chip will issue cacheline transactions
17369                  * to streamable DMA memory with not all the byte
17370                  * enables turned on.  This is an error on several
17371                  * RISC PCI controllers, in particular sparc64.
17372                  *
17373                  * On 5703/5704 chips, this bit has been reassigned
17374                  * a different meaning.  In particular, it is used
17375                  * on those chips to enable a PCI-X workaround.
17376                  */
17377                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17378         }
17379
17380         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17381
17382
17383         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17384             tg3_asic_rev(tp) != ASIC_REV_5701)
17385                 goto out;
17386
17387         /* It is best to perform DMA test with maximum write burst size
17388          * to expose the 5700/5701 write DMA bug.
17389          */
17390         saved_dma_rwctrl = tp->dma_rwctrl;
17391         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17392         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17393
17394         while (1) {
17395                 u32 *p = buf, i;
17396
17397                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17398                         p[i] = i;
17399
17400                 /* Send the buffer to the chip. */
17401                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17402                 if (ret) {
17403                         dev_err(&tp->pdev->dev,
17404                                 "%s: Buffer write failed. err = %d\n",
17405                                 __func__, ret);
17406                         break;
17407                 }
17408
17409                 /* Now read it back. */
17410                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17411                 if (ret) {
17412                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17413                                 "err = %d\n", __func__, ret);
17414                         break;
17415                 }
17416
17417                 /* Verify it. */
17418                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17419                         if (p[i] == i)
17420                                 continue;
17421
17422                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17423                             DMA_RWCTRL_WRITE_BNDRY_16) {
17424                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17425                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17426                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17427                                 break;
17428                         } else {
17429                                 dev_err(&tp->pdev->dev,
17430                                         "%s: Buffer corrupted on read back! "
17431                                         "(%d != %d)\n", __func__, p[i], i);
17432                                 ret = -ENODEV;
17433                                 goto out;
17434                         }
17435                 }
17436
17437                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17438                         /* Success. */
17439                         ret = 0;
17440                         break;
17441                 }
17442         }
17443         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17444             DMA_RWCTRL_WRITE_BNDRY_16) {
17445                 /* DMA test passed without adjusting DMA boundary,
17446                  * now look for chipsets that are known to expose the
17447                  * DMA bug without failing the test.
17448                  */
17449                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17450                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17451                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17452                 } else {
17453                         /* Safe to use the calculated DMA boundary. */
17454                         tp->dma_rwctrl = saved_dma_rwctrl;
17455                 }
17456
17457                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17458         }
17459
17460 out:
17461         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17462 out_nofree:
17463         return ret;
17464 }
17465
17466 static void tg3_init_bufmgr_config(struct tg3 *tp)
17467 {
17468         if (tg3_flag(tp, 57765_PLUS)) {
17469                 tp->bufmgr_config.mbuf_read_dma_low_water =
17470                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17471                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17472                         DEFAULT_MB_MACRX_LOW_WATER_57765;
17473                 tp->bufmgr_config.mbuf_high_water =
17474                         DEFAULT_MB_HIGH_WATER_57765;
17475
17476                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17477                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17478                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17479                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17480                 tp->bufmgr_config.mbuf_high_water_jumbo =
17481                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17482         } else if (tg3_flag(tp, 5705_PLUS)) {
17483                 tp->bufmgr_config.mbuf_read_dma_low_water =
17484                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17485                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17486                         DEFAULT_MB_MACRX_LOW_WATER_5705;
17487                 tp->bufmgr_config.mbuf_high_water =
17488                         DEFAULT_MB_HIGH_WATER_5705;
17489                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17490                         tp->bufmgr_config.mbuf_mac_rx_low_water =
17491                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
17492                         tp->bufmgr_config.mbuf_high_water =
17493                                 DEFAULT_MB_HIGH_WATER_5906;
17494                 }
17495
17496                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17497                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17498                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17499                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17500                 tp->bufmgr_config.mbuf_high_water_jumbo =
17501                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17502         } else {
17503                 tp->bufmgr_config.mbuf_read_dma_low_water =
17504                         DEFAULT_MB_RDMA_LOW_WATER;
17505                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17506                         DEFAULT_MB_MACRX_LOW_WATER;
17507                 tp->bufmgr_config.mbuf_high_water =
17508                         DEFAULT_MB_HIGH_WATER;
17509
17510                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17511                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17512                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17513                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17514                 tp->bufmgr_config.mbuf_high_water_jumbo =
17515                         DEFAULT_MB_HIGH_WATER_JUMBO;
17516         }
17517
17518         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17519         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17520 }
17521
17522 static char *tg3_phy_string(struct tg3 *tp)
17523 {
17524         switch (tp->phy_id & TG3_PHY_ID_MASK) {
17525         case TG3_PHY_ID_BCM5400:        return "5400";
17526         case TG3_PHY_ID_BCM5401:        return "5401";
17527         case TG3_PHY_ID_BCM5411:        return "5411";
17528         case TG3_PHY_ID_BCM5701:        return "5701";
17529         case TG3_PHY_ID_BCM5703:        return "5703";
17530         case TG3_PHY_ID_BCM5704:        return "5704";
17531         case TG3_PHY_ID_BCM5705:        return "5705";
17532         case TG3_PHY_ID_BCM5750:        return "5750";
17533         case TG3_PHY_ID_BCM5752:        return "5752";
17534         case TG3_PHY_ID_BCM5714:        return "5714";
17535         case TG3_PHY_ID_BCM5780:        return "5780";
17536         case TG3_PHY_ID_BCM5755:        return "5755";
17537         case TG3_PHY_ID_BCM5787:        return "5787";
17538         case TG3_PHY_ID_BCM5784:        return "5784";
17539         case TG3_PHY_ID_BCM5756:        return "5722/5756";
17540         case TG3_PHY_ID_BCM5906:        return "5906";
17541         case TG3_PHY_ID_BCM5761:        return "5761";
17542         case TG3_PHY_ID_BCM5718C:       return "5718C";
17543         case TG3_PHY_ID_BCM5718S:       return "5718S";
17544         case TG3_PHY_ID_BCM57765:       return "57765";
17545         case TG3_PHY_ID_BCM5719C:       return "5719C";
17546         case TG3_PHY_ID_BCM5720C:       return "5720C";
17547         case TG3_PHY_ID_BCM5762:        return "5762C";
17548         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
17549         case 0:                 return "serdes";
17550         default:                return "unknown";
17551         }
17552 }
17553
17554 static char *tg3_bus_string(struct tg3 *tp, char *str)
17555 {
17556         if (tg3_flag(tp, PCI_EXPRESS)) {
17557                 strcpy(str, "PCI Express");
17558                 return str;
17559         } else if (tg3_flag(tp, PCIX_MODE)) {
17560                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17561
17562                 strcpy(str, "PCIX:");
17563
17564                 if ((clock_ctrl == 7) ||
17565                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17566                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17567                         strcat(str, "133MHz");
17568                 else if (clock_ctrl == 0)
17569                         strcat(str, "33MHz");
17570                 else if (clock_ctrl == 2)
17571                         strcat(str, "50MHz");
17572                 else if (clock_ctrl == 4)
17573                         strcat(str, "66MHz");
17574                 else if (clock_ctrl == 6)
17575                         strcat(str, "100MHz");
17576         } else {
17577                 strcpy(str, "PCI:");
17578                 if (tg3_flag(tp, PCI_HIGH_SPEED))
17579                         strcat(str, "66MHz");
17580                 else
17581                         strcat(str, "33MHz");
17582         }
17583         if (tg3_flag(tp, PCI_32BIT))
17584                 strcat(str, ":32-bit");
17585         else
17586                 strcat(str, ":64-bit");
17587         return str;
17588 }
17589
17590 static void tg3_init_coal(struct tg3 *tp)
17591 {
17592         struct ethtool_coalesce *ec = &tp->coal;
17593
17594         memset(ec, 0, sizeof(*ec));
17595         ec->cmd = ETHTOOL_GCOALESCE;
17596         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17597         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17598         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17599         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17600         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17601         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17602         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17603         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17604         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17605
17606         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17607                                  HOSTCC_MODE_CLRTICK_TXBD)) {
17608                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17609                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17610                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17611                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17612         }
17613
17614         if (tg3_flag(tp, 5705_PLUS)) {
17615                 ec->rx_coalesce_usecs_irq = 0;
17616                 ec->tx_coalesce_usecs_irq = 0;
17617                 ec->stats_block_coalesce_usecs = 0;
17618         }
17619 }
17620
17621 static int tg3_init_one(struct pci_dev *pdev,
17622                                   const struct pci_device_id *ent)
17623 {
17624         struct net_device *dev;
17625         struct tg3 *tp;
17626         int i, err;
17627         u32 sndmbx, rcvmbx, intmbx;
17628         char str[40];
17629         u64 dma_mask, persist_dma_mask;
17630         netdev_features_t features = 0;
17631
17632         printk_once(KERN_INFO "%s\n", version);
17633
17634         err = pci_enable_device(pdev);
17635         if (err) {
17636                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17637                 return err;
17638         }
17639
17640         err = pci_request_regions(pdev, DRV_MODULE_NAME);
17641         if (err) {
17642                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17643                 goto err_out_disable_pdev;
17644         }
17645
17646         pci_set_master(pdev);
17647
17648         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17649         if (!dev) {
17650                 err = -ENOMEM;
17651                 goto err_out_free_res;
17652         }
17653
17654         SET_NETDEV_DEV(dev, &pdev->dev);
17655
17656         tp = netdev_priv(dev);
17657         tp->pdev = pdev;
17658         tp->dev = dev;
17659         tp->rx_mode = TG3_DEF_RX_MODE;
17660         tp->tx_mode = TG3_DEF_TX_MODE;
17661         tp->irq_sync = 1;
17662         tp->pcierr_recovery = false;
17663
17664         if (tg3_debug > 0)
17665                 tp->msg_enable = tg3_debug;
17666         else
17667                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17668
17669         if (pdev_is_ssb_gige_core(pdev)) {
17670                 tg3_flag_set(tp, IS_SSB_CORE);
17671                 if (ssb_gige_must_flush_posted_writes(pdev))
17672                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17673                 if (ssb_gige_one_dma_at_once(pdev))
17674                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17675                 if (ssb_gige_have_roboswitch(pdev)) {
17676                         tg3_flag_set(tp, USE_PHYLIB);
17677                         tg3_flag_set(tp, ROBOSWITCH);
17678                 }
17679                 if (ssb_gige_is_rgmii(pdev))
17680                         tg3_flag_set(tp, RGMII_MODE);
17681         }
17682
17683         /* The word/byte swap controls here control register access byte
17684          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17685          * setting below.
17686          */
17687         tp->misc_host_ctrl =
17688                 MISC_HOST_CTRL_MASK_PCI_INT |
17689                 MISC_HOST_CTRL_WORD_SWAP |
17690                 MISC_HOST_CTRL_INDIR_ACCESS |
17691                 MISC_HOST_CTRL_PCISTATE_RW;
17692
17693         /* The NONFRM (non-frame) byte/word swap controls take effect
17694          * on descriptor entries, anything which isn't packet data.
17695          *
17696          * The StrongARM chips on the board (one for tx, one for rx)
17697          * are running in big-endian mode.
17698          */
17699         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17700                         GRC_MODE_WSWAP_NONFRM_DATA);
17701 #ifdef __BIG_ENDIAN
17702         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17703 #endif
17704         spin_lock_init(&tp->lock);
17705         spin_lock_init(&tp->indirect_lock);
17706         INIT_WORK(&tp->reset_task, tg3_reset_task);
17707
17708         tp->regs = pci_ioremap_bar(pdev, BAR_0);
17709         if (!tp->regs) {
17710                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17711                 err = -ENOMEM;
17712                 goto err_out_free_dev;
17713         }
17714
17715         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17716             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17717             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17718             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17719             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17720             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17721             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17722             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17723             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17724             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17725             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17726             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17727             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17728             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17729             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17730                 tg3_flag_set(tp, ENABLE_APE);
17731                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17732                 if (!tp->aperegs) {
17733                         dev_err(&pdev->dev,
17734                                 "Cannot map APE registers, aborting\n");
17735                         err = -ENOMEM;
17736                         goto err_out_iounmap;
17737                 }
17738         }
17739
17740         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17741         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17742
17743         dev->ethtool_ops = &tg3_ethtool_ops;
17744         dev->watchdog_timeo = TG3_TX_TIMEOUT;
17745         dev->netdev_ops = &tg3_netdev_ops;
17746         dev->irq = pdev->irq;
17747
17748         err = tg3_get_invariants(tp, ent);
17749         if (err) {
17750                 dev_err(&pdev->dev,
17751                         "Problem fetching invariants of chip, aborting\n");
17752                 goto err_out_apeunmap;
17753         }
17754
17755         /* The EPB bridge inside 5714, 5715, and 5780 and any
17756          * device behind the EPB cannot support DMA addresses > 40-bit.
17757          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17758          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17759          * do DMA address check in tg3_start_xmit().
17760          */
17761         if (tg3_flag(tp, IS_5788))
17762                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17763         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17764                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17765 #ifdef CONFIG_HIGHMEM
17766                 dma_mask = DMA_BIT_MASK(64);
17767 #endif
17768         } else
17769                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17770
17771         /* Configure DMA attributes. */
17772         if (dma_mask > DMA_BIT_MASK(32)) {
17773                 err = pci_set_dma_mask(pdev, dma_mask);
17774                 if (!err) {
17775                         features |= NETIF_F_HIGHDMA;
17776                         err = pci_set_consistent_dma_mask(pdev,
17777                                                           persist_dma_mask);
17778                         if (err < 0) {
17779                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17780                                         "DMA for consistent allocations\n");
17781                                 goto err_out_apeunmap;
17782                         }
17783                 }
17784         }
17785         if (err || dma_mask == DMA_BIT_MASK(32)) {
17786                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17787                 if (err) {
17788                         dev_err(&pdev->dev,
17789                                 "No usable DMA configuration, aborting\n");
17790                         goto err_out_apeunmap;
17791                 }
17792         }
17793
17794         tg3_init_bufmgr_config(tp);
17795
17796         /* 5700 B0 chips do not support checksumming correctly due
17797          * to hardware bugs.
17798          */
17799         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17800                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17801
17802                 if (tg3_flag(tp, 5755_PLUS))
17803                         features |= NETIF_F_IPV6_CSUM;
17804         }
17805
17806         /* TSO is on by default on chips that support hardware TSO.
17807          * Firmware TSO on older chips gives lower performance, so it
17808          * is off by default, but can be enabled using ethtool.
17809          */
17810         if ((tg3_flag(tp, HW_TSO_1) ||
17811              tg3_flag(tp, HW_TSO_2) ||
17812              tg3_flag(tp, HW_TSO_3)) &&
17813             (features & NETIF_F_IP_CSUM))
17814                 features |= NETIF_F_TSO;
17815         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17816                 if (features & NETIF_F_IPV6_CSUM)
17817                         features |= NETIF_F_TSO6;
17818                 if (tg3_flag(tp, HW_TSO_3) ||
17819                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
17820                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17821                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17822                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
17823                     tg3_asic_rev(tp) == ASIC_REV_57780)
17824                         features |= NETIF_F_TSO_ECN;
17825         }
17826
17827         dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17828                          NETIF_F_HW_VLAN_CTAG_RX;
17829         dev->vlan_features |= features;
17830
17831         /*
17832          * Add loopback capability only for a subset of devices that support
17833          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17834          * loopback for the remaining devices.
17835          */
17836         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17837             !tg3_flag(tp, CPMU_PRESENT))
17838                 /* Add the loopback capability */
17839                 features |= NETIF_F_LOOPBACK;
17840
17841         dev->hw_features |= features;
17842         dev->priv_flags |= IFF_UNICAST_FLT;
17843
17844         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17845             !tg3_flag(tp, TSO_CAPABLE) &&
17846             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17847                 tg3_flag_set(tp, MAX_RXPEND_64);
17848                 tp->rx_pending = 63;
17849         }
17850
17851         err = tg3_get_device_address(tp);
17852         if (err) {
17853                 dev_err(&pdev->dev,
17854                         "Could not obtain valid ethernet address, aborting\n");
17855                 goto err_out_apeunmap;
17856         }
17857
17858         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17859         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17860         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17861         for (i = 0; i < tp->irq_max; i++) {
17862                 struct tg3_napi *tnapi = &tp->napi[i];
17863
17864                 tnapi->tp = tp;
17865                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17866
17867                 tnapi->int_mbox = intmbx;
17868                 if (i <= 4)
17869                         intmbx += 0x8;
17870                 else
17871                         intmbx += 0x4;
17872
17873                 tnapi->consmbox = rcvmbx;
17874                 tnapi->prodmbox = sndmbx;
17875
17876                 if (i)
17877                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17878                 else
17879                         tnapi->coal_now = HOSTCC_MODE_NOW;
17880
17881                 if (!tg3_flag(tp, SUPPORT_MSIX))
17882                         break;
17883
17884                 /*
17885                  * If we support MSIX, we'll be using RSS.  If we're using
17886                  * RSS, the first vector only handles link interrupts and the
17887                  * remaining vectors handle rx and tx interrupts.  Reuse the
17888                  * mailbox values for the next iteration.  The values we setup
17889                  * above are still useful for the single vectored mode.
17890                  */
17891                 if (!i)
17892                         continue;
17893
17894                 rcvmbx += 0x8;
17895
17896                 if (sndmbx & 0x4)
17897                         sndmbx -= 0x4;
17898                 else
17899                         sndmbx += 0xc;
17900         }
17901
17902         /*
17903          * Reset chip in case UNDI or EFI driver did not shutdown
17904          * DMA self test will enable WDMAC and we'll see (spurious)
17905          * pending DMA on the PCI bus at that point.
17906          */
17907         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17908             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17909                 tg3_full_lock(tp, 0);
17910                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17911                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17912                 tg3_full_unlock(tp);
17913         }
17914
17915         err = tg3_test_dma(tp);
17916         if (err) {
17917                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17918                 goto err_out_apeunmap;
17919         }
17920
17921         tg3_init_coal(tp);
17922
17923         pci_set_drvdata(pdev, dev);
17924
17925         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17926             tg3_asic_rev(tp) == ASIC_REV_5720 ||
17927             tg3_asic_rev(tp) == ASIC_REV_5762)
17928                 tg3_flag_set(tp, PTP_CAPABLE);
17929
17930         tg3_timer_init(tp);
17931
17932         tg3_carrier_off(tp);
17933
17934         err = register_netdev(dev);
17935         if (err) {
17936                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17937                 goto err_out_apeunmap;
17938         }
17939
17940         if (tg3_flag(tp, PTP_CAPABLE)) {
17941                 tg3_ptp_init(tp);
17942                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17943                                                    &tp->pdev->dev);
17944                 if (IS_ERR(tp->ptp_clock))
17945                         tp->ptp_clock = NULL;
17946         }
17947
17948         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17949                     tp->board_part_number,
17950                     tg3_chip_rev_id(tp),
17951                     tg3_bus_string(tp, str),
17952                     dev->dev_addr);
17953
17954         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17955                 struct phy_device *phydev;
17956                 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
17957                 netdev_info(dev,
17958                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17959                             phydev->drv->name, dev_name(&phydev->dev));
17960         } else {
17961                 char *ethtype;
17962
17963                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17964                         ethtype = "10/100Base-TX";
17965                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17966                         ethtype = "1000Base-SX";
17967                 else
17968                         ethtype = "10/100/1000Base-T";
17969
17970                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17971                             "(WireSpeed[%d], EEE[%d])\n",
17972                             tg3_phy_string(tp), ethtype,
17973                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17974                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17975         }
17976
17977         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17978                     (dev->features & NETIF_F_RXCSUM) != 0,
17979                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
17980                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17981                     tg3_flag(tp, ENABLE_ASF) != 0,
17982                     tg3_flag(tp, TSO_CAPABLE) != 0);
17983         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17984                     tp->dma_rwctrl,
17985                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17986                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17987
17988         pci_save_state(pdev);
17989
17990         return 0;
17991
17992 err_out_apeunmap:
17993         if (tp->aperegs) {
17994                 iounmap(tp->aperegs);
17995                 tp->aperegs = NULL;
17996         }
17997
17998 err_out_iounmap:
17999         if (tp->regs) {
18000                 iounmap(tp->regs);
18001                 tp->regs = NULL;
18002         }
18003
18004 err_out_free_dev:
18005         free_netdev(dev);
18006
18007 err_out_free_res:
18008         pci_release_regions(pdev);
18009
18010 err_out_disable_pdev:
18011         if (pci_is_enabled(pdev))
18012                 pci_disable_device(pdev);
18013         return err;
18014 }
18015
18016 static void tg3_remove_one(struct pci_dev *pdev)
18017 {
18018         struct net_device *dev = pci_get_drvdata(pdev);
18019
18020         if (dev) {
18021                 struct tg3 *tp = netdev_priv(dev);
18022
18023                 tg3_ptp_fini(tp);
18024
18025                 release_firmware(tp->fw);
18026
18027                 tg3_reset_task_cancel(tp);
18028
18029                 if (tg3_flag(tp, USE_PHYLIB)) {
18030                         tg3_phy_fini(tp);
18031                         tg3_mdio_fini(tp);
18032                 }
18033
18034                 unregister_netdev(dev);
18035                 if (tp->aperegs) {
18036                         iounmap(tp->aperegs);
18037                         tp->aperegs = NULL;
18038                 }
18039                 if (tp->regs) {
18040                         iounmap(tp->regs);
18041                         tp->regs = NULL;
18042                 }
18043                 free_netdev(dev);
18044                 pci_release_regions(pdev);
18045                 pci_disable_device(pdev);
18046         }
18047 }
18048
18049 #ifdef CONFIG_PM_SLEEP
18050 static int tg3_suspend(struct device *device)
18051 {
18052         struct pci_dev *pdev = to_pci_dev(device);
18053         struct net_device *dev = pci_get_drvdata(pdev);
18054         struct tg3 *tp = netdev_priv(dev);
18055         int err = 0;
18056
18057         rtnl_lock();
18058
18059         if (!netif_running(dev))
18060                 goto unlock;
18061
18062         tg3_reset_task_cancel(tp);
18063         tg3_phy_stop(tp);
18064         tg3_netif_stop(tp);
18065
18066         tg3_timer_stop(tp);
18067
18068         tg3_full_lock(tp, 1);
18069         tg3_disable_ints(tp);
18070         tg3_full_unlock(tp);
18071
18072         netif_device_detach(dev);
18073
18074         tg3_full_lock(tp, 0);
18075         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18076         tg3_flag_clear(tp, INIT_COMPLETE);
18077         tg3_full_unlock(tp);
18078
18079         err = tg3_power_down_prepare(tp);
18080         if (err) {
18081                 int err2;
18082
18083                 tg3_full_lock(tp, 0);
18084
18085                 tg3_flag_set(tp, INIT_COMPLETE);
18086                 err2 = tg3_restart_hw(tp, true);
18087                 if (err2)
18088                         goto out;
18089
18090                 tg3_timer_start(tp);
18091
18092                 netif_device_attach(dev);
18093                 tg3_netif_start(tp);
18094
18095 out:
18096                 tg3_full_unlock(tp);
18097
18098                 if (!err2)
18099                         tg3_phy_start(tp);
18100         }
18101
18102 unlock:
18103         rtnl_unlock();
18104         return err;
18105 }
18106
18107 static int tg3_resume(struct device *device)
18108 {
18109         struct pci_dev *pdev = to_pci_dev(device);
18110         struct net_device *dev = pci_get_drvdata(pdev);
18111         struct tg3 *tp = netdev_priv(dev);
18112         int err = 0;
18113
18114         rtnl_lock();
18115
18116         if (!netif_running(dev))
18117                 goto unlock;
18118
18119         netif_device_attach(dev);
18120
18121         tg3_full_lock(tp, 0);
18122
18123         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18124
18125         tg3_flag_set(tp, INIT_COMPLETE);
18126         err = tg3_restart_hw(tp,
18127                              !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18128         if (err)
18129                 goto out;
18130
18131         tg3_timer_start(tp);
18132
18133         tg3_netif_start(tp);
18134
18135 out:
18136         tg3_full_unlock(tp);
18137
18138         if (!err)
18139                 tg3_phy_start(tp);
18140
18141 unlock:
18142         rtnl_unlock();
18143         return err;
18144 }
18145 #endif /* CONFIG_PM_SLEEP */
18146
18147 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18148
18149 static void tg3_shutdown(struct pci_dev *pdev)
18150 {
18151         struct net_device *dev = pci_get_drvdata(pdev);
18152         struct tg3 *tp = netdev_priv(dev);
18153
18154         rtnl_lock();
18155         netif_device_detach(dev);
18156
18157         if (netif_running(dev))
18158                 dev_close(dev);
18159
18160         if (system_state == SYSTEM_POWER_OFF)
18161                 tg3_power_down(tp);
18162
18163         rtnl_unlock();
18164 }
18165
18166 /**
18167  * tg3_io_error_detected - called when PCI error is detected
18168  * @pdev: Pointer to PCI device
18169  * @state: The current pci connection state
18170  *
18171  * This function is called after a PCI bus error affecting
18172  * this device has been detected.
18173  */
18174 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18175                                               pci_channel_state_t state)
18176 {
18177         struct net_device *netdev = pci_get_drvdata(pdev);
18178         struct tg3 *tp = netdev_priv(netdev);
18179         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18180
18181         netdev_info(netdev, "PCI I/O error detected\n");
18182
18183         rtnl_lock();
18184
18185         /* Could be second call or maybe we don't have netdev yet */
18186         if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
18187                 goto done;
18188
18189         /* We needn't recover from permanent error */
18190         if (state == pci_channel_io_frozen)
18191                 tp->pcierr_recovery = true;
18192
18193         tg3_phy_stop(tp);
18194
18195         tg3_netif_stop(tp);
18196
18197         tg3_timer_stop(tp);
18198
18199         /* Want to make sure that the reset task doesn't run */
18200         tg3_reset_task_cancel(tp);
18201
18202         netif_device_detach(netdev);
18203
18204         /* Clean up software state, even if MMIO is blocked */
18205         tg3_full_lock(tp, 0);
18206         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18207         tg3_full_unlock(tp);
18208
18209 done:
18210         if (state == pci_channel_io_perm_failure) {
18211                 if (netdev) {
18212                         tg3_napi_enable(tp);
18213                         dev_close(netdev);
18214                 }
18215                 err = PCI_ERS_RESULT_DISCONNECT;
18216         } else {
18217                 pci_disable_device(pdev);
18218         }
18219
18220         rtnl_unlock();
18221
18222         return err;
18223 }
18224
18225 /**
18226  * tg3_io_slot_reset - called after the pci bus has been reset.
18227  * @pdev: Pointer to PCI device
18228  *
18229  * Restart the card from scratch, as if from a cold-boot.
18230  * At this point, the card has exprienced a hard reset,
18231  * followed by fixups by BIOS, and has its config space
18232  * set up identically to what it was at cold boot.
18233  */
18234 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18235 {
18236         struct net_device *netdev = pci_get_drvdata(pdev);
18237         struct tg3 *tp = netdev_priv(netdev);
18238         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18239         int err;
18240
18241         rtnl_lock();
18242
18243         if (pci_enable_device(pdev)) {
18244                 dev_err(&pdev->dev,
18245                         "Cannot re-enable PCI device after reset.\n");
18246                 goto done;
18247         }
18248
18249         pci_set_master(pdev);
18250         pci_restore_state(pdev);
18251         pci_save_state(pdev);
18252
18253         if (!netdev || !netif_running(netdev)) {
18254                 rc = PCI_ERS_RESULT_RECOVERED;
18255                 goto done;
18256         }
18257
18258         err = tg3_power_up(tp);
18259         if (err)
18260                 goto done;
18261
18262         rc = PCI_ERS_RESULT_RECOVERED;
18263
18264 done:
18265         if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18266                 tg3_napi_enable(tp);
18267                 dev_close(netdev);
18268         }
18269         rtnl_unlock();
18270
18271         return rc;
18272 }
18273
18274 /**
18275  * tg3_io_resume - called when traffic can start flowing again.
18276  * @pdev: Pointer to PCI device
18277  *
18278  * This callback is called when the error recovery driver tells
18279  * us that its OK to resume normal operation.
18280  */
18281 static void tg3_io_resume(struct pci_dev *pdev)
18282 {
18283         struct net_device *netdev = pci_get_drvdata(pdev);
18284         struct tg3 *tp = netdev_priv(netdev);
18285         int err;
18286
18287         rtnl_lock();
18288
18289         if (!netdev || !netif_running(netdev))
18290                 goto done;
18291
18292         tg3_full_lock(tp, 0);
18293         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18294         tg3_flag_set(tp, INIT_COMPLETE);
18295         err = tg3_restart_hw(tp, true);
18296         if (err) {
18297                 tg3_full_unlock(tp);
18298                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18299                 goto done;
18300         }
18301
18302         netif_device_attach(netdev);
18303
18304         tg3_timer_start(tp);
18305
18306         tg3_netif_start(tp);
18307
18308         tg3_full_unlock(tp);
18309
18310         tg3_phy_start(tp);
18311
18312 done:
18313         tp->pcierr_recovery = false;
18314         rtnl_unlock();
18315 }
18316
18317 static const struct pci_error_handlers tg3_err_handler = {
18318         .error_detected = tg3_io_error_detected,
18319         .slot_reset     = tg3_io_slot_reset,
18320         .resume         = tg3_io_resume
18321 };
18322
18323 static struct pci_driver tg3_driver = {
18324         .name           = DRV_MODULE_NAME,
18325         .id_table       = tg3_pci_tbl,
18326         .probe          = tg3_init_one,
18327         .remove         = tg3_remove_one,
18328         .err_handler    = &tg3_err_handler,
18329         .driver.pm      = &tg3_pm_ops,
18330         .shutdown       = tg3_shutdown,
18331 };
18332
18333 module_pci_driver(tg3_driver);