GNU Linux-libre 4.9.326-gnu1
[releases.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2014 Broadcom Corporation.
8  *
9 /*(DEBLOBBED)*/
10
11
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/stringify.h>
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/compiler.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
20 #include <linux/in.h>
21 #include <linux/interrupt.h>
22 #include <linux/ioport.h>
23 #include <linux/pci.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/skbuff.h>
27 #include <linux/ethtool.h>
28 #include <linux/mdio.h>
29 #include <linux/mii.h>
30 #include <linux/phy.h>
31 #include <linux/brcmphy.h>
32 #include <linux/if.h>
33 #include <linux/if_vlan.h>
34 #include <linux/ip.h>
35 #include <linux/tcp.h>
36 #include <linux/workqueue.h>
37 #include <linux/prefetch.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/firmware.h>
40 #include <linux/ssb/ssb_driver_gige.h>
41 #include <linux/hwmon.h>
42 #include <linux/hwmon-sysfs.h>
43
44 #include <net/checksum.h>
45 #include <net/ip.h>
46
47 #include <linux/io.h>
48 #include <asm/byteorder.h>
49 #include <linux/uaccess.h>
50
51 #include <uapi/linux/net_tstamp.h>
52 #include <linux/ptp_clock_kernel.h>
53
54 #ifdef CONFIG_SPARC
55 #include <asm/idprom.h>
56 #include <asm/prom.h>
57 #endif
58
59 #define BAR_0   0
60 #define BAR_2   2
61
62 #include "tg3.h"
63
64 /* Functions & macros to verify TG3_FLAGS types */
65
66 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
67 {
68         return test_bit(flag, bits);
69 }
70
71 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
72 {
73         set_bit(flag, bits);
74 }
75
76 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
77 {
78         clear_bit(flag, bits);
79 }
80
81 #define tg3_flag(tp, flag)                              \
82         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
83 #define tg3_flag_set(tp, flag)                          \
84         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_clear(tp, flag)                        \
86         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
87
88 #define DRV_MODULE_NAME         "tg3"
89 #define TG3_MAJ_NUM                     3
90 #define TG3_MIN_NUM                     137
91 #define DRV_MODULE_VERSION      \
92         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
93 #define DRV_MODULE_RELDATE      "May 11, 2014"
94
95 #define RESET_KIND_SHUTDOWN     0
96 #define RESET_KIND_INIT         1
97 #define RESET_KIND_SUSPEND      2
98
99 #define TG3_DEF_RX_MODE         0
100 #define TG3_DEF_TX_MODE         0
101 #define TG3_DEF_MSG_ENABLE        \
102         (NETIF_MSG_DRV          | \
103          NETIF_MSG_PROBE        | \
104          NETIF_MSG_LINK         | \
105          NETIF_MSG_TIMER        | \
106          NETIF_MSG_IFDOWN       | \
107          NETIF_MSG_IFUP         | \
108          NETIF_MSG_RX_ERR       | \
109          NETIF_MSG_TX_ERR)
110
111 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
112
113 /* length of time before we decide the hardware is borked,
114  * and dev->tx_timeout() should be called to fix the problem
115  */
116
117 #define TG3_TX_TIMEOUT                  (5 * HZ)
118
119 /* hardware minimum and maximum for a single frame's data payload */
120 #define TG3_MIN_MTU                     60
121 #define TG3_MAX_MTU(tp) \
122         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
123
124 /* These numbers seem to be hard coded in the NIC firmware somehow.
125  * You can't change the ring sizes, but you can change where you place
126  * them in the NIC onboard memory.
127  */
128 #define TG3_RX_STD_RING_SIZE(tp) \
129         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
130          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
131 #define TG3_DEF_RX_RING_PENDING         200
132 #define TG3_RX_JMB_RING_SIZE(tp) \
133         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
134          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
135 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
136
137 /* Do not place this n-ring entries value into the tp struct itself,
138  * we really want to expose these constants to GCC so that modulo et
139  * al.  operations are done with shifts and masks instead of with
140  * hw multiply/modulo instructions.  Another solution would be to
141  * replace things like '% foo' with '& (foo - 1)'.
142  */
143
144 #define TG3_TX_RING_SIZE                512
145 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
146
147 #define TG3_RX_STD_RING_BYTES(tp) \
148         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
149 #define TG3_RX_JMB_RING_BYTES(tp) \
150         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
151 #define TG3_RX_RCB_RING_BYTES(tp) \
152         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
153 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
154                                  TG3_TX_RING_SIZE)
155 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
156
157 #define TG3_DMA_BYTE_ENAB               64
158
159 #define TG3_RX_STD_DMA_SZ               1536
160 #define TG3_RX_JMB_DMA_SZ               9046
161
162 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
163
164 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
165 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
166
167 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
168         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
169
170 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
171         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
172
173 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
174  * that are at least dword aligned when used in PCIX mode.  The driver
175  * works around this bug by double copying the packet.  This workaround
176  * is built into the normal double copy length check for efficiency.
177  *
178  * However, the double copy is only necessary on those architectures
179  * where unaligned memory accesses are inefficient.  For those architectures
180  * where unaligned memory accesses incur little penalty, we can reintegrate
181  * the 5701 in the normal rx path.  Doing so saves a device structure
182  * dereference by hardcoding the double copy threshold in place.
183  */
184 #define TG3_RX_COPY_THRESHOLD           256
185 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
186         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
187 #else
188         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
189 #endif
190
191 #if (NET_IP_ALIGN != 0)
192 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
193 #else
194 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
195 #endif
196
197 /* minimum number of free TX descriptors required to wake up TX process */
198 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
199 #define TG3_TX_BD_DMA_MAX_2K            2048
200 #define TG3_TX_BD_DMA_MAX_4K            4096
201
202 #define TG3_RAW_IP_ALIGN 2
203
204 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
205 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
206
207 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
208 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
209
210 #define FIRMWARE_TG3            "/*(DEBLOBBED)*/"
211 #define FIRMWARE_TG357766       "/*(DEBLOBBED)*/"
212 #define FIRMWARE_TG3TSO         "/*(DEBLOBBED)*/"
213 #define FIRMWARE_TG3TSO5        "/*(DEBLOBBED)*/"
214
215 static char version[] =
216         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
217
218 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
219 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
220 MODULE_LICENSE("GPL");
221 MODULE_VERSION(DRV_MODULE_VERSION);
222 /*(DEBLOBBED)*/
223
224 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
225 module_param(tg3_debug, int, 0);
226 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
227
228 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
229 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
230
231 static const struct pci_device_id tg3_pci_tbl[] = {
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
251          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
252                         TG3_DRV_DATA_FLAG_5705_10_100},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
254          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
255                         TG3_DRV_DATA_FLAG_5705_10_100},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
258          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
259                         TG3_DRV_DATA_FLAG_5705_10_100},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
266          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
272          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
280         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
281                         PCI_VENDOR_ID_LENOVO,
282                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
283          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
286          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
305         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
306                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
307          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
308         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
309                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
310          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
311         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
312         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
313         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
314          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
315         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
316         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
317         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
318         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
319         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
320         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
322         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
324          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
326          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
329         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
330         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
332         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
334         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
335         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
336         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
338         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
339         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
340         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
341         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
342         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
343         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
344         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
345         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
346         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
347         {}
348 };
349
350 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
351
352 static const struct {
353         const char string[ETH_GSTRING_LEN];
354 } ethtool_stats_keys[] = {
355         { "rx_octets" },
356         { "rx_fragments" },
357         { "rx_ucast_packets" },
358         { "rx_mcast_packets" },
359         { "rx_bcast_packets" },
360         { "rx_fcs_errors" },
361         { "rx_align_errors" },
362         { "rx_xon_pause_rcvd" },
363         { "rx_xoff_pause_rcvd" },
364         { "rx_mac_ctrl_rcvd" },
365         { "rx_xoff_entered" },
366         { "rx_frame_too_long_errors" },
367         { "rx_jabbers" },
368         { "rx_undersize_packets" },
369         { "rx_in_length_errors" },
370         { "rx_out_length_errors" },
371         { "rx_64_or_less_octet_packets" },
372         { "rx_65_to_127_octet_packets" },
373         { "rx_128_to_255_octet_packets" },
374         { "rx_256_to_511_octet_packets" },
375         { "rx_512_to_1023_octet_packets" },
376         { "rx_1024_to_1522_octet_packets" },
377         { "rx_1523_to_2047_octet_packets" },
378         { "rx_2048_to_4095_octet_packets" },
379         { "rx_4096_to_8191_octet_packets" },
380         { "rx_8192_to_9022_octet_packets" },
381
382         { "tx_octets" },
383         { "tx_collisions" },
384
385         { "tx_xon_sent" },
386         { "tx_xoff_sent" },
387         { "tx_flow_control" },
388         { "tx_mac_errors" },
389         { "tx_single_collisions" },
390         { "tx_mult_collisions" },
391         { "tx_deferred" },
392         { "tx_excessive_collisions" },
393         { "tx_late_collisions" },
394         { "tx_collide_2times" },
395         { "tx_collide_3times" },
396         { "tx_collide_4times" },
397         { "tx_collide_5times" },
398         { "tx_collide_6times" },
399         { "tx_collide_7times" },
400         { "tx_collide_8times" },
401         { "tx_collide_9times" },
402         { "tx_collide_10times" },
403         { "tx_collide_11times" },
404         { "tx_collide_12times" },
405         { "tx_collide_13times" },
406         { "tx_collide_14times" },
407         { "tx_collide_15times" },
408         { "tx_ucast_packets" },
409         { "tx_mcast_packets" },
410         { "tx_bcast_packets" },
411         { "tx_carrier_sense_errors" },
412         { "tx_discards" },
413         { "tx_errors" },
414
415         { "dma_writeq_full" },
416         { "dma_write_prioq_full" },
417         { "rxbds_empty" },
418         { "rx_discards" },
419         { "rx_errors" },
420         { "rx_threshold_hit" },
421
422         { "dma_readq_full" },
423         { "dma_read_prioq_full" },
424         { "tx_comp_queue_full" },
425
426         { "ring_set_send_prod_index" },
427         { "ring_status_update" },
428         { "nic_irqs" },
429         { "nic_avoided_irqs" },
430         { "nic_tx_threshold_hit" },
431
432         { "mbuf_lwm_thresh_hit" },
433 };
434
435 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
436 #define TG3_NVRAM_TEST          0
437 #define TG3_LINK_TEST           1
438 #define TG3_REGISTER_TEST       2
439 #define TG3_MEMORY_TEST         3
440 #define TG3_MAC_LOOPB_TEST      4
441 #define TG3_PHY_LOOPB_TEST      5
442 #define TG3_EXT_LOOPB_TEST      6
443 #define TG3_INTERRUPT_TEST      7
444
445
446 static const struct {
447         const char string[ETH_GSTRING_LEN];
448 } ethtool_test_keys[] = {
449         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
450         [TG3_LINK_TEST]         = { "link test         (online) " },
451         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
452         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
453         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
454         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
455         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
456         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
457 };
458
459 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
460
461
462 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
463 {
464         writel(val, tp->regs + off);
465 }
466
467 static u32 tg3_read32(struct tg3 *tp, u32 off)
468 {
469         return readl(tp->regs + off);
470 }
471
472 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
473 {
474         writel(val, tp->aperegs + off);
475 }
476
477 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
478 {
479         return readl(tp->aperegs + off);
480 }
481
482 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
483 {
484         unsigned long flags;
485
486         spin_lock_irqsave(&tp->indirect_lock, flags);
487         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
488         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
489         spin_unlock_irqrestore(&tp->indirect_lock, flags);
490 }
491
492 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
493 {
494         writel(val, tp->regs + off);
495         readl(tp->regs + off);
496 }
497
498 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
499 {
500         unsigned long flags;
501         u32 val;
502
503         spin_lock_irqsave(&tp->indirect_lock, flags);
504         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
505         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
506         spin_unlock_irqrestore(&tp->indirect_lock, flags);
507         return val;
508 }
509
510 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
511 {
512         unsigned long flags;
513
514         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
515                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
516                                        TG3_64BIT_REG_LOW, val);
517                 return;
518         }
519         if (off == TG3_RX_STD_PROD_IDX_REG) {
520                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
521                                        TG3_64BIT_REG_LOW, val);
522                 return;
523         }
524
525         spin_lock_irqsave(&tp->indirect_lock, flags);
526         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
527         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
528         spin_unlock_irqrestore(&tp->indirect_lock, flags);
529
530         /* In indirect mode when disabling interrupts, we also need
531          * to clear the interrupt bit in the GRC local ctrl register.
532          */
533         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
534             (val == 0x1)) {
535                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
536                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
537         }
538 }
539
540 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
541 {
542         unsigned long flags;
543         u32 val;
544
545         spin_lock_irqsave(&tp->indirect_lock, flags);
546         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
547         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
548         spin_unlock_irqrestore(&tp->indirect_lock, flags);
549         return val;
550 }
551
552 /* usec_wait specifies the wait time in usec when writing to certain registers
553  * where it is unsafe to read back the register without some delay.
554  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
555  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
556  */
557 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
558 {
559         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
560                 /* Non-posted methods */
561                 tp->write32(tp, off, val);
562         else {
563                 /* Posted method */
564                 tg3_write32(tp, off, val);
565                 if (usec_wait)
566                         udelay(usec_wait);
567                 tp->read32(tp, off);
568         }
569         /* Wait again after the read for the posted method to guarantee that
570          * the wait time is met.
571          */
572         if (usec_wait)
573                 udelay(usec_wait);
574 }
575
576 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
577 {
578         tp->write32_mbox(tp, off, val);
579         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
580             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
581              !tg3_flag(tp, ICH_WORKAROUND)))
582                 tp->read32_mbox(tp, off);
583 }
584
585 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
586 {
587         void __iomem *mbox = tp->regs + off;
588         writel(val, mbox);
589         if (tg3_flag(tp, TXD_MBOX_HWBUG))
590                 writel(val, mbox);
591         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
592             tg3_flag(tp, FLUSH_POSTED_WRITES))
593                 readl(mbox);
594 }
595
596 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
597 {
598         return readl(tp->regs + off + GRCMBOX_BASE);
599 }
600
601 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
602 {
603         writel(val, tp->regs + off + GRCMBOX_BASE);
604 }
605
606 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
607 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
608 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
609 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
610 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
611
612 #define tw32(reg, val)                  tp->write32(tp, reg, val)
613 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
614 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
615 #define tr32(reg)                       tp->read32(tp, reg)
616
617 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
618 {
619         unsigned long flags;
620
621         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
622             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
623                 return;
624
625         spin_lock_irqsave(&tp->indirect_lock, flags);
626         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
627                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
628                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
629
630                 /* Always leave this as zero. */
631                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
632         } else {
633                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
634                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
635
636                 /* Always leave this as zero. */
637                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
638         }
639         spin_unlock_irqrestore(&tp->indirect_lock, flags);
640 }
641
642 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
643 {
644         unsigned long flags;
645
646         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
647             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
648                 *val = 0;
649                 return;
650         }
651
652         spin_lock_irqsave(&tp->indirect_lock, flags);
653         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
654                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
655                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
656
657                 /* Always leave this as zero. */
658                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
659         } else {
660                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
661                 *val = tr32(TG3PCI_MEM_WIN_DATA);
662
663                 /* Always leave this as zero. */
664                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
665         }
666         spin_unlock_irqrestore(&tp->indirect_lock, flags);
667 }
668
669 static void tg3_ape_lock_init(struct tg3 *tp)
670 {
671         int i;
672         u32 regbase, bit;
673
674         if (tg3_asic_rev(tp) == ASIC_REV_5761)
675                 regbase = TG3_APE_LOCK_GRANT;
676         else
677                 regbase = TG3_APE_PER_LOCK_GRANT;
678
679         /* Make sure the driver hasn't any stale locks. */
680         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
681                 switch (i) {
682                 case TG3_APE_LOCK_PHY0:
683                 case TG3_APE_LOCK_PHY1:
684                 case TG3_APE_LOCK_PHY2:
685                 case TG3_APE_LOCK_PHY3:
686                         bit = APE_LOCK_GRANT_DRIVER;
687                         break;
688                 default:
689                         if (!tp->pci_fn)
690                                 bit = APE_LOCK_GRANT_DRIVER;
691                         else
692                                 bit = 1 << tp->pci_fn;
693                 }
694                 tg3_ape_write32(tp, regbase + 4 * i, bit);
695         }
696
697 }
698
699 static int tg3_ape_lock(struct tg3 *tp, int locknum)
700 {
701         int i, off;
702         int ret = 0;
703         u32 status, req, gnt, bit;
704
705         if (!tg3_flag(tp, ENABLE_APE))
706                 return 0;
707
708         switch (locknum) {
709         case TG3_APE_LOCK_GPIO:
710                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
711                         return 0;
712         case TG3_APE_LOCK_GRC:
713         case TG3_APE_LOCK_MEM:
714                 if (!tp->pci_fn)
715                         bit = APE_LOCK_REQ_DRIVER;
716                 else
717                         bit = 1 << tp->pci_fn;
718                 break;
719         case TG3_APE_LOCK_PHY0:
720         case TG3_APE_LOCK_PHY1:
721         case TG3_APE_LOCK_PHY2:
722         case TG3_APE_LOCK_PHY3:
723                 bit = APE_LOCK_REQ_DRIVER;
724                 break;
725         default:
726                 return -EINVAL;
727         }
728
729         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
730                 req = TG3_APE_LOCK_REQ;
731                 gnt = TG3_APE_LOCK_GRANT;
732         } else {
733                 req = TG3_APE_PER_LOCK_REQ;
734                 gnt = TG3_APE_PER_LOCK_GRANT;
735         }
736
737         off = 4 * locknum;
738
739         tg3_ape_write32(tp, req + off, bit);
740
741         /* Wait for up to 1 millisecond to acquire lock. */
742         for (i = 0; i < 100; i++) {
743                 status = tg3_ape_read32(tp, gnt + off);
744                 if (status == bit)
745                         break;
746                 if (pci_channel_offline(tp->pdev))
747                         break;
748
749                 udelay(10);
750         }
751
752         if (status != bit) {
753                 /* Revoke the lock request. */
754                 tg3_ape_write32(tp, gnt + off, bit);
755                 ret = -EBUSY;
756         }
757
758         return ret;
759 }
760
761 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
762 {
763         u32 gnt, bit;
764
765         if (!tg3_flag(tp, ENABLE_APE))
766                 return;
767
768         switch (locknum) {
769         case TG3_APE_LOCK_GPIO:
770                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
771                         return;
772         case TG3_APE_LOCK_GRC:
773         case TG3_APE_LOCK_MEM:
774                 if (!tp->pci_fn)
775                         bit = APE_LOCK_GRANT_DRIVER;
776                 else
777                         bit = 1 << tp->pci_fn;
778                 break;
779         case TG3_APE_LOCK_PHY0:
780         case TG3_APE_LOCK_PHY1:
781         case TG3_APE_LOCK_PHY2:
782         case TG3_APE_LOCK_PHY3:
783                 bit = APE_LOCK_GRANT_DRIVER;
784                 break;
785         default:
786                 return;
787         }
788
789         if (tg3_asic_rev(tp) == ASIC_REV_5761)
790                 gnt = TG3_APE_LOCK_GRANT;
791         else
792                 gnt = TG3_APE_PER_LOCK_GRANT;
793
794         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
795 }
796
797 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
798 {
799         u32 apedata;
800
801         while (timeout_us) {
802                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
803                         return -EBUSY;
804
805                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
806                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
807                         break;
808
809                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
810
811                 udelay(10);
812                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
813         }
814
815         return timeout_us ? 0 : -EBUSY;
816 }
817
818 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
819 {
820         u32 i, apedata;
821
822         for (i = 0; i < timeout_us / 10; i++) {
823                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
824
825                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
826                         break;
827
828                 udelay(10);
829         }
830
831         return i == timeout_us / 10;
832 }
833
834 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
835                                    u32 len)
836 {
837         int err;
838         u32 i, bufoff, msgoff, maxlen, apedata;
839
840         if (!tg3_flag(tp, APE_HAS_NCSI))
841                 return 0;
842
843         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
844         if (apedata != APE_SEG_SIG_MAGIC)
845                 return -ENODEV;
846
847         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
848         if (!(apedata & APE_FW_STATUS_READY))
849                 return -EAGAIN;
850
851         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
852                  TG3_APE_SHMEM_BASE;
853         msgoff = bufoff + 2 * sizeof(u32);
854         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
855
856         while (len) {
857                 u32 length;
858
859                 /* Cap xfer sizes to scratchpad limits. */
860                 length = (len > maxlen) ? maxlen : len;
861                 len -= length;
862
863                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
864                 if (!(apedata & APE_FW_STATUS_READY))
865                         return -EAGAIN;
866
867                 /* Wait for up to 1 msec for APE to service previous event. */
868                 err = tg3_ape_event_lock(tp, 1000);
869                 if (err)
870                         return err;
871
872                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
873                           APE_EVENT_STATUS_SCRTCHPD_READ |
874                           APE_EVENT_STATUS_EVENT_PENDING;
875                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
876
877                 tg3_ape_write32(tp, bufoff, base_off);
878                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
879
880                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
881                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
882
883                 base_off += length;
884
885                 if (tg3_ape_wait_for_event(tp, 30000))
886                         return -EAGAIN;
887
888                 for (i = 0; length; i += 4, length -= 4) {
889                         u32 val = tg3_ape_read32(tp, msgoff + i);
890                         memcpy(data, &val, sizeof(u32));
891                         data++;
892                 }
893         }
894
895         return 0;
896 }
897
898 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
899 {
900         int err;
901         u32 apedata;
902
903         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
904         if (apedata != APE_SEG_SIG_MAGIC)
905                 return -EAGAIN;
906
907         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
908         if (!(apedata & APE_FW_STATUS_READY))
909                 return -EAGAIN;
910
911         /* Wait for up to 1 millisecond for APE to service previous event. */
912         err = tg3_ape_event_lock(tp, 1000);
913         if (err)
914                 return err;
915
916         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
917                         event | APE_EVENT_STATUS_EVENT_PENDING);
918
919         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
920         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
921
922         return 0;
923 }
924
925 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
926 {
927         u32 event;
928         u32 apedata;
929
930         if (!tg3_flag(tp, ENABLE_APE))
931                 return;
932
933         switch (kind) {
934         case RESET_KIND_INIT:
935                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
936                                 APE_HOST_SEG_SIG_MAGIC);
937                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
938                                 APE_HOST_SEG_LEN_MAGIC);
939                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
940                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
941                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
942                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
943                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
944                                 APE_HOST_BEHAV_NO_PHYLOCK);
945                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
946                                     TG3_APE_HOST_DRVR_STATE_START);
947
948                 event = APE_EVENT_STATUS_STATE_START;
949                 break;
950         case RESET_KIND_SHUTDOWN:
951                 /* With the interface we are currently using,
952                  * APE does not track driver state.  Wiping
953                  * out the HOST SEGMENT SIGNATURE forces
954                  * the APE to assume OS absent status.
955                  */
956                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
957
958                 if (device_may_wakeup(&tp->pdev->dev) &&
959                     tg3_flag(tp, WOL_ENABLE)) {
960                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
961                                             TG3_APE_HOST_WOL_SPEED_AUTO);
962                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
963                 } else
964                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
965
966                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
967
968                 event = APE_EVENT_STATUS_STATE_UNLOAD;
969                 break;
970         default:
971                 return;
972         }
973
974         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
975
976         tg3_ape_send_event(tp, event);
977 }
978
979 static void tg3_disable_ints(struct tg3 *tp)
980 {
981         int i;
982
983         tw32(TG3PCI_MISC_HOST_CTRL,
984              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
985         for (i = 0; i < tp->irq_max; i++)
986                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
987 }
988
989 static void tg3_enable_ints(struct tg3 *tp)
990 {
991         int i;
992
993         tp->irq_sync = 0;
994         wmb();
995
996         tw32(TG3PCI_MISC_HOST_CTRL,
997              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
998
999         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1000         for (i = 0; i < tp->irq_cnt; i++) {
1001                 struct tg3_napi *tnapi = &tp->napi[i];
1002
1003                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1004                 if (tg3_flag(tp, 1SHOT_MSI))
1005                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1006
1007                 tp->coal_now |= tnapi->coal_now;
1008         }
1009
1010         /* Force an initial interrupt */
1011         if (!tg3_flag(tp, TAGGED_STATUS) &&
1012             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1013                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1014         else
1015                 tw32(HOSTCC_MODE, tp->coal_now);
1016
1017         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1018 }
1019
1020 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1021 {
1022         struct tg3 *tp = tnapi->tp;
1023         struct tg3_hw_status *sblk = tnapi->hw_status;
1024         unsigned int work_exists = 0;
1025
1026         /* check for phy events */
1027         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1028                 if (sblk->status & SD_STATUS_LINK_CHG)
1029                         work_exists = 1;
1030         }
1031
1032         /* check for TX work to do */
1033         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1034                 work_exists = 1;
1035
1036         /* check for RX work to do */
1037         if (tnapi->rx_rcb_prod_idx &&
1038             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1039                 work_exists = 1;
1040
1041         return work_exists;
1042 }
1043
1044 /* tg3_int_reenable
1045  *  similar to tg3_enable_ints, but it accurately determines whether there
1046  *  is new work pending and can return without flushing the PIO write
1047  *  which reenables interrupts
1048  */
1049 static void tg3_int_reenable(struct tg3_napi *tnapi)
1050 {
1051         struct tg3 *tp = tnapi->tp;
1052
1053         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1054         mmiowb();
1055
1056         /* When doing tagged status, this work check is unnecessary.
1057          * The last_tag we write above tells the chip which piece of
1058          * work we've completed.
1059          */
1060         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1061                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1062                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1063 }
1064
1065 static void tg3_switch_clocks(struct tg3 *tp)
1066 {
1067         u32 clock_ctrl;
1068         u32 orig_clock_ctrl;
1069
1070         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1071                 return;
1072
1073         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1074
1075         orig_clock_ctrl = clock_ctrl;
1076         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1077                        CLOCK_CTRL_CLKRUN_OENABLE |
1078                        0x1f);
1079         tp->pci_clock_ctrl = clock_ctrl;
1080
1081         if (tg3_flag(tp, 5705_PLUS)) {
1082                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1083                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1084                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1085                 }
1086         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1087                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1088                             clock_ctrl |
1089                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1090                             40);
1091                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1092                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1093                             40);
1094         }
1095         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1096 }
1097
1098 #define PHY_BUSY_LOOPS  5000
1099
1100 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1101                          u32 *val)
1102 {
1103         u32 frame_val;
1104         unsigned int loops;
1105         int ret;
1106
1107         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1108                 tw32_f(MAC_MI_MODE,
1109                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1110                 udelay(80);
1111         }
1112
1113         tg3_ape_lock(tp, tp->phy_ape_lock);
1114
1115         *val = 0x0;
1116
1117         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1118                       MI_COM_PHY_ADDR_MASK);
1119         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1120                       MI_COM_REG_ADDR_MASK);
1121         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1122
1123         tw32_f(MAC_MI_COM, frame_val);
1124
1125         loops = PHY_BUSY_LOOPS;
1126         while (loops != 0) {
1127                 udelay(10);
1128                 frame_val = tr32(MAC_MI_COM);
1129
1130                 if ((frame_val & MI_COM_BUSY) == 0) {
1131                         udelay(5);
1132                         frame_val = tr32(MAC_MI_COM);
1133                         break;
1134                 }
1135                 loops -= 1;
1136         }
1137
1138         ret = -EBUSY;
1139         if (loops != 0) {
1140                 *val = frame_val & MI_COM_DATA_MASK;
1141                 ret = 0;
1142         }
1143
1144         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1145                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1146                 udelay(80);
1147         }
1148
1149         tg3_ape_unlock(tp, tp->phy_ape_lock);
1150
1151         return ret;
1152 }
1153
1154 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1155 {
1156         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1157 }
1158
1159 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1160                           u32 val)
1161 {
1162         u32 frame_val;
1163         unsigned int loops;
1164         int ret;
1165
1166         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1167             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1168                 return 0;
1169
1170         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1171                 tw32_f(MAC_MI_MODE,
1172                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1173                 udelay(80);
1174         }
1175
1176         tg3_ape_lock(tp, tp->phy_ape_lock);
1177
1178         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1179                       MI_COM_PHY_ADDR_MASK);
1180         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1181                       MI_COM_REG_ADDR_MASK);
1182         frame_val |= (val & MI_COM_DATA_MASK);
1183         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1184
1185         tw32_f(MAC_MI_COM, frame_val);
1186
1187         loops = PHY_BUSY_LOOPS;
1188         while (loops != 0) {
1189                 udelay(10);
1190                 frame_val = tr32(MAC_MI_COM);
1191                 if ((frame_val & MI_COM_BUSY) == 0) {
1192                         udelay(5);
1193                         frame_val = tr32(MAC_MI_COM);
1194                         break;
1195                 }
1196                 loops -= 1;
1197         }
1198
1199         ret = -EBUSY;
1200         if (loops != 0)
1201                 ret = 0;
1202
1203         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1204                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1205                 udelay(80);
1206         }
1207
1208         tg3_ape_unlock(tp, tp->phy_ape_lock);
1209
1210         return ret;
1211 }
1212
1213 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1214 {
1215         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1216 }
1217
1218 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1219 {
1220         int err;
1221
1222         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1223         if (err)
1224                 goto done;
1225
1226         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1227         if (err)
1228                 goto done;
1229
1230         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1231                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1232         if (err)
1233                 goto done;
1234
1235         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1236
1237 done:
1238         return err;
1239 }
1240
1241 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1242 {
1243         int err;
1244
1245         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1246         if (err)
1247                 goto done;
1248
1249         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1250         if (err)
1251                 goto done;
1252
1253         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1254                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1255         if (err)
1256                 goto done;
1257
1258         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1259
1260 done:
1261         return err;
1262 }
1263
1264 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1265 {
1266         int err;
1267
1268         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1269         if (!err)
1270                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1271
1272         return err;
1273 }
1274
1275 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1276 {
1277         int err;
1278
1279         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1280         if (!err)
1281                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1282
1283         return err;
1284 }
1285
1286 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1287 {
1288         int err;
1289
1290         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1291                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1292                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1293         if (!err)
1294                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1295
1296         return err;
1297 }
1298
1299 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1300 {
1301         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1302                 set |= MII_TG3_AUXCTL_MISC_WREN;
1303
1304         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1305 }
1306
1307 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1308 {
1309         u32 val;
1310         int err;
1311
1312         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1313
1314         if (err)
1315                 return err;
1316
1317         if (enable)
1318                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1319         else
1320                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1321
1322         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1323                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1324
1325         return err;
1326 }
1327
1328 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1329 {
1330         return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1331                             reg | val | MII_TG3_MISC_SHDW_WREN);
1332 }
1333
1334 static int tg3_bmcr_reset(struct tg3 *tp)
1335 {
1336         u32 phy_control;
1337         int limit, err;
1338
1339         /* OK, reset it, and poll the BMCR_RESET bit until it
1340          * clears or we time out.
1341          */
1342         phy_control = BMCR_RESET;
1343         err = tg3_writephy(tp, MII_BMCR, phy_control);
1344         if (err != 0)
1345                 return -EBUSY;
1346
1347         limit = 5000;
1348         while (limit--) {
1349                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1350                 if (err != 0)
1351                         return -EBUSY;
1352
1353                 if ((phy_control & BMCR_RESET) == 0) {
1354                         udelay(40);
1355                         break;
1356                 }
1357                 udelay(10);
1358         }
1359         if (limit < 0)
1360                 return -EBUSY;
1361
1362         return 0;
1363 }
1364
1365 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1366 {
1367         struct tg3 *tp = bp->priv;
1368         u32 val;
1369
1370         spin_lock_bh(&tp->lock);
1371
1372         if (__tg3_readphy(tp, mii_id, reg, &val))
1373                 val = -EIO;
1374
1375         spin_unlock_bh(&tp->lock);
1376
1377         return val;
1378 }
1379
1380 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1381 {
1382         struct tg3 *tp = bp->priv;
1383         u32 ret = 0;
1384
1385         spin_lock_bh(&tp->lock);
1386
1387         if (__tg3_writephy(tp, mii_id, reg, val))
1388                 ret = -EIO;
1389
1390         spin_unlock_bh(&tp->lock);
1391
1392         return ret;
1393 }
1394
1395 static void tg3_mdio_config_5785(struct tg3 *tp)
1396 {
1397         u32 val;
1398         struct phy_device *phydev;
1399
1400         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1401         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1402         case PHY_ID_BCM50610:
1403         case PHY_ID_BCM50610M:
1404                 val = MAC_PHYCFG2_50610_LED_MODES;
1405                 break;
1406         case PHY_ID_BCMAC131:
1407                 val = MAC_PHYCFG2_AC131_LED_MODES;
1408                 break;
1409         case PHY_ID_RTL8211C:
1410                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1411                 break;
1412         case PHY_ID_RTL8201E:
1413                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1414                 break;
1415         default:
1416                 return;
1417         }
1418
1419         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1420                 tw32(MAC_PHYCFG2, val);
1421
1422                 val = tr32(MAC_PHYCFG1);
1423                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1424                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1425                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1426                 tw32(MAC_PHYCFG1, val);
1427
1428                 return;
1429         }
1430
1431         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1432                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1433                        MAC_PHYCFG2_FMODE_MASK_MASK |
1434                        MAC_PHYCFG2_GMODE_MASK_MASK |
1435                        MAC_PHYCFG2_ACT_MASK_MASK   |
1436                        MAC_PHYCFG2_QUAL_MASK_MASK |
1437                        MAC_PHYCFG2_INBAND_ENABLE;
1438
1439         tw32(MAC_PHYCFG2, val);
1440
1441         val = tr32(MAC_PHYCFG1);
1442         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1443                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1444         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1445                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1446                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1447                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1448                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1449         }
1450         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1451                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1452         tw32(MAC_PHYCFG1, val);
1453
1454         val = tr32(MAC_EXT_RGMII_MODE);
1455         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1456                  MAC_RGMII_MODE_RX_QUALITY |
1457                  MAC_RGMII_MODE_RX_ACTIVITY |
1458                  MAC_RGMII_MODE_RX_ENG_DET |
1459                  MAC_RGMII_MODE_TX_ENABLE |
1460                  MAC_RGMII_MODE_TX_LOWPWR |
1461                  MAC_RGMII_MODE_TX_RESET);
1462         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1463                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1464                         val |= MAC_RGMII_MODE_RX_INT_B |
1465                                MAC_RGMII_MODE_RX_QUALITY |
1466                                MAC_RGMII_MODE_RX_ACTIVITY |
1467                                MAC_RGMII_MODE_RX_ENG_DET;
1468                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1469                         val |= MAC_RGMII_MODE_TX_ENABLE |
1470                                MAC_RGMII_MODE_TX_LOWPWR |
1471                                MAC_RGMII_MODE_TX_RESET;
1472         }
1473         tw32(MAC_EXT_RGMII_MODE, val);
1474 }
1475
1476 static void tg3_mdio_start(struct tg3 *tp)
1477 {
1478         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1479         tw32_f(MAC_MI_MODE, tp->mi_mode);
1480         udelay(80);
1481
1482         if (tg3_flag(tp, MDIOBUS_INITED) &&
1483             tg3_asic_rev(tp) == ASIC_REV_5785)
1484                 tg3_mdio_config_5785(tp);
1485 }
1486
1487 static int tg3_mdio_init(struct tg3 *tp)
1488 {
1489         int i;
1490         u32 reg;
1491         struct phy_device *phydev;
1492
1493         if (tg3_flag(tp, 5717_PLUS)) {
1494                 u32 is_serdes;
1495
1496                 tp->phy_addr = tp->pci_fn + 1;
1497
1498                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1499                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1500                 else
1501                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1502                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1503                 if (is_serdes)
1504                         tp->phy_addr += 7;
1505         } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1506                 int addr;
1507
1508                 addr = ssb_gige_get_phyaddr(tp->pdev);
1509                 if (addr < 0)
1510                         return addr;
1511                 tp->phy_addr = addr;
1512         } else
1513                 tp->phy_addr = TG3_PHY_MII_ADDR;
1514
1515         tg3_mdio_start(tp);
1516
1517         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1518                 return 0;
1519
1520         tp->mdio_bus = mdiobus_alloc();
1521         if (tp->mdio_bus == NULL)
1522                 return -ENOMEM;
1523
1524         tp->mdio_bus->name     = "tg3 mdio bus";
1525         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1526                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1527         tp->mdio_bus->priv     = tp;
1528         tp->mdio_bus->parent   = &tp->pdev->dev;
1529         tp->mdio_bus->read     = &tg3_mdio_read;
1530         tp->mdio_bus->write    = &tg3_mdio_write;
1531         tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1532
1533         /* The bus registration will look for all the PHYs on the mdio bus.
1534          * Unfortunately, it does not ensure the PHY is powered up before
1535          * accessing the PHY ID registers.  A chip reset is the
1536          * quickest way to bring the device back to an operational state..
1537          */
1538         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1539                 tg3_bmcr_reset(tp);
1540
1541         i = mdiobus_register(tp->mdio_bus);
1542         if (i) {
1543                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1544                 mdiobus_free(tp->mdio_bus);
1545                 return i;
1546         }
1547
1548         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1549
1550         if (!phydev || !phydev->drv) {
1551                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1552                 mdiobus_unregister(tp->mdio_bus);
1553                 mdiobus_free(tp->mdio_bus);
1554                 return -ENODEV;
1555         }
1556
1557         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1558         case PHY_ID_BCM57780:
1559                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1560                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1561                 break;
1562         case PHY_ID_BCM50610:
1563         case PHY_ID_BCM50610M:
1564                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1565                                      PHY_BRCM_RX_REFCLK_UNUSED |
1566                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1567                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1568                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1569                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1570                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1571                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1572                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1573                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1574                 /* fallthru */
1575         case PHY_ID_RTL8211C:
1576                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1577                 break;
1578         case PHY_ID_RTL8201E:
1579         case PHY_ID_BCMAC131:
1580                 phydev->interface = PHY_INTERFACE_MODE_MII;
1581                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1582                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1583                 break;
1584         }
1585
1586         tg3_flag_set(tp, MDIOBUS_INITED);
1587
1588         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1589                 tg3_mdio_config_5785(tp);
1590
1591         return 0;
1592 }
1593
1594 static void tg3_mdio_fini(struct tg3 *tp)
1595 {
1596         if (tg3_flag(tp, MDIOBUS_INITED)) {
1597                 tg3_flag_clear(tp, MDIOBUS_INITED);
1598                 mdiobus_unregister(tp->mdio_bus);
1599                 mdiobus_free(tp->mdio_bus);
1600         }
1601 }
1602
1603 /* tp->lock is held. */
1604 static inline void tg3_generate_fw_event(struct tg3 *tp)
1605 {
1606         u32 val;
1607
1608         val = tr32(GRC_RX_CPU_EVENT);
1609         val |= GRC_RX_CPU_DRIVER_EVENT;
1610         tw32_f(GRC_RX_CPU_EVENT, val);
1611
1612         tp->last_event_jiffies = jiffies;
1613 }
1614
1615 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1616
1617 /* tp->lock is held. */
1618 static void tg3_wait_for_event_ack(struct tg3 *tp)
1619 {
1620         int i;
1621         unsigned int delay_cnt;
1622         long time_remain;
1623
1624         /* If enough time has passed, no wait is necessary. */
1625         time_remain = (long)(tp->last_event_jiffies + 1 +
1626                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1627                       (long)jiffies;
1628         if (time_remain < 0)
1629                 return;
1630
1631         /* Check if we can shorten the wait time. */
1632         delay_cnt = jiffies_to_usecs(time_remain);
1633         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1634                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1635         delay_cnt = (delay_cnt >> 3) + 1;
1636
1637         for (i = 0; i < delay_cnt; i++) {
1638                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1639                         break;
1640                 if (pci_channel_offline(tp->pdev))
1641                         break;
1642
1643                 udelay(8);
1644         }
1645 }
1646
1647 /* tp->lock is held. */
1648 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1649 {
1650         u32 reg, val;
1651
1652         val = 0;
1653         if (!tg3_readphy(tp, MII_BMCR, &reg))
1654                 val = reg << 16;
1655         if (!tg3_readphy(tp, MII_BMSR, &reg))
1656                 val |= (reg & 0xffff);
1657         *data++ = val;
1658
1659         val = 0;
1660         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1661                 val = reg << 16;
1662         if (!tg3_readphy(tp, MII_LPA, &reg))
1663                 val |= (reg & 0xffff);
1664         *data++ = val;
1665
1666         val = 0;
1667         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1668                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1669                         val = reg << 16;
1670                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1671                         val |= (reg & 0xffff);
1672         }
1673         *data++ = val;
1674
1675         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1676                 val = reg << 16;
1677         else
1678                 val = 0;
1679         *data++ = val;
1680 }
1681
1682 /* tp->lock is held. */
1683 static void tg3_ump_link_report(struct tg3 *tp)
1684 {
1685         u32 data[4];
1686
1687         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1688                 return;
1689
1690         tg3_phy_gather_ump_data(tp, data);
1691
1692         tg3_wait_for_event_ack(tp);
1693
1694         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1695         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1696         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1697         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1698         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1699         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1700
1701         tg3_generate_fw_event(tp);
1702 }
1703
1704 /* tp->lock is held. */
1705 static void tg3_stop_fw(struct tg3 *tp)
1706 {
1707         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1708                 /* Wait for RX cpu to ACK the previous event. */
1709                 tg3_wait_for_event_ack(tp);
1710
1711                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1712
1713                 tg3_generate_fw_event(tp);
1714
1715                 /* Wait for RX cpu to ACK this event. */
1716                 tg3_wait_for_event_ack(tp);
1717         }
1718 }
1719
1720 /* tp->lock is held. */
1721 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1722 {
1723         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1724                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1725
1726         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1727                 switch (kind) {
1728                 case RESET_KIND_INIT:
1729                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1730                                       DRV_STATE_START);
1731                         break;
1732
1733                 case RESET_KIND_SHUTDOWN:
1734                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1735                                       DRV_STATE_UNLOAD);
1736                         break;
1737
1738                 case RESET_KIND_SUSPEND:
1739                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1740                                       DRV_STATE_SUSPEND);
1741                         break;
1742
1743                 default:
1744                         break;
1745                 }
1746         }
1747 }
1748
1749 /* tp->lock is held. */
1750 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1751 {
1752         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1753                 switch (kind) {
1754                 case RESET_KIND_INIT:
1755                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1756                                       DRV_STATE_START_DONE);
1757                         break;
1758
1759                 case RESET_KIND_SHUTDOWN:
1760                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1761                                       DRV_STATE_UNLOAD_DONE);
1762                         break;
1763
1764                 default:
1765                         break;
1766                 }
1767         }
1768 }
1769
1770 /* tp->lock is held. */
1771 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1772 {
1773         if (tg3_flag(tp, ENABLE_ASF)) {
1774                 switch (kind) {
1775                 case RESET_KIND_INIT:
1776                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1777                                       DRV_STATE_START);
1778                         break;
1779
1780                 case RESET_KIND_SHUTDOWN:
1781                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1782                                       DRV_STATE_UNLOAD);
1783                         break;
1784
1785                 case RESET_KIND_SUSPEND:
1786                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1787                                       DRV_STATE_SUSPEND);
1788                         break;
1789
1790                 default:
1791                         break;
1792                 }
1793         }
1794 }
1795
1796 static int tg3_poll_fw(struct tg3 *tp)
1797 {
1798         int i;
1799         u32 val;
1800
1801         if (tg3_flag(tp, NO_FWARE_REPORTED))
1802                 return 0;
1803
1804         if (tg3_flag(tp, IS_SSB_CORE)) {
1805                 /* We don't use firmware. */
1806                 return 0;
1807         }
1808
1809         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1810                 /* Wait up to 20ms for init done. */
1811                 for (i = 0; i < 200; i++) {
1812                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1813                                 return 0;
1814                         if (pci_channel_offline(tp->pdev))
1815                                 return -ENODEV;
1816
1817                         udelay(100);
1818                 }
1819                 return -ENODEV;
1820         }
1821
1822         /* Wait for firmware initialization to complete. */
1823         for (i = 0; i < 100000; i++) {
1824                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1825                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1826                         break;
1827                 if (pci_channel_offline(tp->pdev)) {
1828                         if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1829                                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1830                                 netdev_info(tp->dev, "No firmware running\n");
1831                         }
1832
1833                         break;
1834                 }
1835
1836                 udelay(10);
1837         }
1838
1839         /* Chip might not be fitted with firmware.  Some Sun onboard
1840          * parts are configured like that.  So don't signal the timeout
1841          * of the above loop as an error, but do report the lack of
1842          * running firmware once.
1843          */
1844         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1845                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1846
1847                 netdev_info(tp->dev, "No firmware running\n");
1848         }
1849
1850         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1851                 /* The 57765 A0 needs a little more
1852                  * time to do some important work.
1853                  */
1854                 mdelay(10);
1855         }
1856
1857         return 0;
1858 }
1859
1860 static void tg3_link_report(struct tg3 *tp)
1861 {
1862         if (!netif_carrier_ok(tp->dev)) {
1863                 netif_info(tp, link, tp->dev, "Link is down\n");
1864                 tg3_ump_link_report(tp);
1865         } else if (netif_msg_link(tp)) {
1866                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1867                             (tp->link_config.active_speed == SPEED_1000 ?
1868                              1000 :
1869                              (tp->link_config.active_speed == SPEED_100 ?
1870                               100 : 10)),
1871                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1872                              "full" : "half"));
1873
1874                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1875                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1876                             "on" : "off",
1877                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1878                             "on" : "off");
1879
1880                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1881                         netdev_info(tp->dev, "EEE is %s\n",
1882                                     tp->setlpicnt ? "enabled" : "disabled");
1883
1884                 tg3_ump_link_report(tp);
1885         }
1886
1887         tp->link_up = netif_carrier_ok(tp->dev);
1888 }
1889
1890 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1891 {
1892         u32 flowctrl = 0;
1893
1894         if (adv & ADVERTISE_PAUSE_CAP) {
1895                 flowctrl |= FLOW_CTRL_RX;
1896                 if (!(adv & ADVERTISE_PAUSE_ASYM))
1897                         flowctrl |= FLOW_CTRL_TX;
1898         } else if (adv & ADVERTISE_PAUSE_ASYM)
1899                 flowctrl |= FLOW_CTRL_TX;
1900
1901         return flowctrl;
1902 }
1903
1904 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1905 {
1906         u16 miireg;
1907
1908         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1909                 miireg = ADVERTISE_1000XPAUSE;
1910         else if (flow_ctrl & FLOW_CTRL_TX)
1911                 miireg = ADVERTISE_1000XPSE_ASYM;
1912         else if (flow_ctrl & FLOW_CTRL_RX)
1913                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1914         else
1915                 miireg = 0;
1916
1917         return miireg;
1918 }
1919
1920 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1921 {
1922         u32 flowctrl = 0;
1923
1924         if (adv & ADVERTISE_1000XPAUSE) {
1925                 flowctrl |= FLOW_CTRL_RX;
1926                 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1927                         flowctrl |= FLOW_CTRL_TX;
1928         } else if (adv & ADVERTISE_1000XPSE_ASYM)
1929                 flowctrl |= FLOW_CTRL_TX;
1930
1931         return flowctrl;
1932 }
1933
1934 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1935 {
1936         u8 cap = 0;
1937
1938         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1939                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1940         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1941                 if (lcladv & ADVERTISE_1000XPAUSE)
1942                         cap = FLOW_CTRL_RX;
1943                 if (rmtadv & ADVERTISE_1000XPAUSE)
1944                         cap = FLOW_CTRL_TX;
1945         }
1946
1947         return cap;
1948 }
1949
1950 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1951 {
1952         u8 autoneg;
1953         u8 flowctrl = 0;
1954         u32 old_rx_mode = tp->rx_mode;
1955         u32 old_tx_mode = tp->tx_mode;
1956
1957         if (tg3_flag(tp, USE_PHYLIB))
1958                 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1959         else
1960                 autoneg = tp->link_config.autoneg;
1961
1962         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1963                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1964                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1965                 else
1966                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1967         } else
1968                 flowctrl = tp->link_config.flowctrl;
1969
1970         tp->link_config.active_flowctrl = flowctrl;
1971
1972         if (flowctrl & FLOW_CTRL_RX)
1973                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1974         else
1975                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1976
1977         if (old_rx_mode != tp->rx_mode)
1978                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1979
1980         if (flowctrl & FLOW_CTRL_TX)
1981                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1982         else
1983                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1984
1985         if (old_tx_mode != tp->tx_mode)
1986                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1987 }
1988
1989 static void tg3_adjust_link(struct net_device *dev)
1990 {
1991         u8 oldflowctrl, linkmesg = 0;
1992         u32 mac_mode, lcl_adv, rmt_adv;
1993         struct tg3 *tp = netdev_priv(dev);
1994         struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1995
1996         spin_lock_bh(&tp->lock);
1997
1998         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1999                                     MAC_MODE_HALF_DUPLEX);
2000
2001         oldflowctrl = tp->link_config.active_flowctrl;
2002
2003         if (phydev->link) {
2004                 lcl_adv = 0;
2005                 rmt_adv = 0;
2006
2007                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2008                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2009                 else if (phydev->speed == SPEED_1000 ||
2010                          tg3_asic_rev(tp) != ASIC_REV_5785)
2011                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
2012                 else
2013                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2014
2015                 if (phydev->duplex == DUPLEX_HALF)
2016                         mac_mode |= MAC_MODE_HALF_DUPLEX;
2017                 else {
2018                         lcl_adv = mii_advertise_flowctrl(
2019                                   tp->link_config.flowctrl);
2020
2021                         if (phydev->pause)
2022                                 rmt_adv = LPA_PAUSE_CAP;
2023                         if (phydev->asym_pause)
2024                                 rmt_adv |= LPA_PAUSE_ASYM;
2025                 }
2026
2027                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2028         } else
2029                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2030
2031         if (mac_mode != tp->mac_mode) {
2032                 tp->mac_mode = mac_mode;
2033                 tw32_f(MAC_MODE, tp->mac_mode);
2034                 udelay(40);
2035         }
2036
2037         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2038                 if (phydev->speed == SPEED_10)
2039                         tw32(MAC_MI_STAT,
2040                              MAC_MI_STAT_10MBPS_MODE |
2041                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2042                 else
2043                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2044         }
2045
2046         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2047                 tw32(MAC_TX_LENGTHS,
2048                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2049                       (6 << TX_LENGTHS_IPG_SHIFT) |
2050                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2051         else
2052                 tw32(MAC_TX_LENGTHS,
2053                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2054                       (6 << TX_LENGTHS_IPG_SHIFT) |
2055                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2056
2057         if (phydev->link != tp->old_link ||
2058             phydev->speed != tp->link_config.active_speed ||
2059             phydev->duplex != tp->link_config.active_duplex ||
2060             oldflowctrl != tp->link_config.active_flowctrl)
2061                 linkmesg = 1;
2062
2063         tp->old_link = phydev->link;
2064         tp->link_config.active_speed = phydev->speed;
2065         tp->link_config.active_duplex = phydev->duplex;
2066
2067         spin_unlock_bh(&tp->lock);
2068
2069         if (linkmesg)
2070                 tg3_link_report(tp);
2071 }
2072
2073 static int tg3_phy_init(struct tg3 *tp)
2074 {
2075         struct phy_device *phydev;
2076
2077         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2078                 return 0;
2079
2080         /* Bring the PHY back to a known state. */
2081         tg3_bmcr_reset(tp);
2082
2083         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2084
2085         /* Attach the MAC to the PHY. */
2086         phydev = phy_connect(tp->dev, phydev_name(phydev),
2087                              tg3_adjust_link, phydev->interface);
2088         if (IS_ERR(phydev)) {
2089                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2090                 return PTR_ERR(phydev);
2091         }
2092
2093         /* Mask with MAC supported features. */
2094         switch (phydev->interface) {
2095         case PHY_INTERFACE_MODE_GMII:
2096         case PHY_INTERFACE_MODE_RGMII:
2097                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2098                         phydev->supported &= (PHY_GBIT_FEATURES |
2099                                               SUPPORTED_Pause |
2100                                               SUPPORTED_Asym_Pause);
2101                         break;
2102                 }
2103                 /* fallthru */
2104         case PHY_INTERFACE_MODE_MII:
2105                 phydev->supported &= (PHY_BASIC_FEATURES |
2106                                       SUPPORTED_Pause |
2107                                       SUPPORTED_Asym_Pause);
2108                 break;
2109         default:
2110                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2111                 return -EINVAL;
2112         }
2113
2114         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2115
2116         phydev->advertising = phydev->supported;
2117
2118         phy_attached_info(phydev);
2119
2120         return 0;
2121 }
2122
2123 static void tg3_phy_start(struct tg3 *tp)
2124 {
2125         struct phy_device *phydev;
2126
2127         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2128                 return;
2129
2130         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2131
2132         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2133                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2134                 phydev->speed = tp->link_config.speed;
2135                 phydev->duplex = tp->link_config.duplex;
2136                 phydev->autoneg = tp->link_config.autoneg;
2137                 phydev->advertising = tp->link_config.advertising;
2138         }
2139
2140         phy_start(phydev);
2141
2142         phy_start_aneg(phydev);
2143 }
2144
2145 static void tg3_phy_stop(struct tg3 *tp)
2146 {
2147         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2148                 return;
2149
2150         phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2151 }
2152
2153 static void tg3_phy_fini(struct tg3 *tp)
2154 {
2155         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2156                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2157                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2158         }
2159 }
2160
2161 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2162 {
2163         int err;
2164         u32 val;
2165
2166         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2167                 return 0;
2168
2169         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2170                 /* Cannot do read-modify-write on 5401 */
2171                 err = tg3_phy_auxctl_write(tp,
2172                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2173                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2174                                            0x4c20);
2175                 goto done;
2176         }
2177
2178         err = tg3_phy_auxctl_read(tp,
2179                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2180         if (err)
2181                 return err;
2182
2183         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2184         err = tg3_phy_auxctl_write(tp,
2185                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2186
2187 done:
2188         return err;
2189 }
2190
2191 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2192 {
2193         u32 phytest;
2194
2195         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2196                 u32 phy;
2197
2198                 tg3_writephy(tp, MII_TG3_FET_TEST,
2199                              phytest | MII_TG3_FET_SHADOW_EN);
2200                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2201                         if (enable)
2202                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2203                         else
2204                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2205                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2206                 }
2207                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2208         }
2209 }
2210
2211 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2212 {
2213         u32 reg;
2214
2215         if (!tg3_flag(tp, 5705_PLUS) ||
2216             (tg3_flag(tp, 5717_PLUS) &&
2217              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2218                 return;
2219
2220         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2221                 tg3_phy_fet_toggle_apd(tp, enable);
2222                 return;
2223         }
2224
2225         reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2226               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2227               MII_TG3_MISC_SHDW_SCR5_SDTL |
2228               MII_TG3_MISC_SHDW_SCR5_C125OE;
2229         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2230                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2231
2232         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2233
2234
2235         reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2236         if (enable)
2237                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2238
2239         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2240 }
2241
2242 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2243 {
2244         u32 phy;
2245
2246         if (!tg3_flag(tp, 5705_PLUS) ||
2247             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2248                 return;
2249
2250         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2251                 u32 ephy;
2252
2253                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2254                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2255
2256                         tg3_writephy(tp, MII_TG3_FET_TEST,
2257                                      ephy | MII_TG3_FET_SHADOW_EN);
2258                         if (!tg3_readphy(tp, reg, &phy)) {
2259                                 if (enable)
2260                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2261                                 else
2262                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2263                                 tg3_writephy(tp, reg, phy);
2264                         }
2265                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2266                 }
2267         } else {
2268                 int ret;
2269
2270                 ret = tg3_phy_auxctl_read(tp,
2271                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2272                 if (!ret) {
2273                         if (enable)
2274                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2275                         else
2276                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2277                         tg3_phy_auxctl_write(tp,
2278                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2279                 }
2280         }
2281 }
2282
2283 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2284 {
2285         int ret;
2286         u32 val;
2287
2288         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2289                 return;
2290
2291         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2292         if (!ret)
2293                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2294                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2295 }
2296
2297 static void tg3_phy_apply_otp(struct tg3 *tp)
2298 {
2299         u32 otp, phy;
2300
2301         if (!tp->phy_otp)
2302                 return;
2303
2304         otp = tp->phy_otp;
2305
2306         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2307                 return;
2308
2309         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2310         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2311         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2312
2313         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2314               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2315         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2316
2317         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2318         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2319         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2320
2321         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2322         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2323
2324         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2325         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2326
2327         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2328               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2329         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2330
2331         tg3_phy_toggle_auxctl_smdsp(tp, false);
2332 }
2333
2334 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2335 {
2336         u32 val;
2337         struct ethtool_eee *dest = &tp->eee;
2338
2339         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2340                 return;
2341
2342         if (eee)
2343                 dest = eee;
2344
2345         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2346                 return;
2347
2348         /* Pull eee_active */
2349         if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2350             val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2351                 dest->eee_active = 1;
2352         } else
2353                 dest->eee_active = 0;
2354
2355         /* Pull lp advertised settings */
2356         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2357                 return;
2358         dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2359
2360         /* Pull advertised and eee_enabled settings */
2361         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2362                 return;
2363         dest->eee_enabled = !!val;
2364         dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2365
2366         /* Pull tx_lpi_enabled */
2367         val = tr32(TG3_CPMU_EEE_MODE);
2368         dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2369
2370         /* Pull lpi timer value */
2371         dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2372 }
2373
2374 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2375 {
2376         u32 val;
2377
2378         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2379                 return;
2380
2381         tp->setlpicnt = 0;
2382
2383         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2384             current_link_up &&
2385             tp->link_config.active_duplex == DUPLEX_FULL &&
2386             (tp->link_config.active_speed == SPEED_100 ||
2387              tp->link_config.active_speed == SPEED_1000)) {
2388                 u32 eeectl;
2389
2390                 if (tp->link_config.active_speed == SPEED_1000)
2391                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2392                 else
2393                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2394
2395                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2396
2397                 tg3_eee_pull_config(tp, NULL);
2398                 if (tp->eee.eee_active)
2399                         tp->setlpicnt = 2;
2400         }
2401
2402         if (!tp->setlpicnt) {
2403                 if (current_link_up &&
2404                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2405                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2406                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2407                 }
2408
2409                 val = tr32(TG3_CPMU_EEE_MODE);
2410                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2411         }
2412 }
2413
2414 static void tg3_phy_eee_enable(struct tg3 *tp)
2415 {
2416         u32 val;
2417
2418         if (tp->link_config.active_speed == SPEED_1000 &&
2419             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2420              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2421              tg3_flag(tp, 57765_CLASS)) &&
2422             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2423                 val = MII_TG3_DSP_TAP26_ALNOKO |
2424                       MII_TG3_DSP_TAP26_RMRXSTO;
2425                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2426                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2427         }
2428
2429         val = tr32(TG3_CPMU_EEE_MODE);
2430         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2431 }
2432
2433 static int tg3_wait_macro_done(struct tg3 *tp)
2434 {
2435         int limit = 100;
2436
2437         while (limit--) {
2438                 u32 tmp32;
2439
2440                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2441                         if ((tmp32 & 0x1000) == 0)
2442                                 break;
2443                 }
2444         }
2445         if (limit < 0)
2446                 return -EBUSY;
2447
2448         return 0;
2449 }
2450
2451 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2452 {
2453         static const u32 test_pat[4][6] = {
2454         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2455         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2456         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2457         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2458         };
2459         int chan;
2460
2461         for (chan = 0; chan < 4; chan++) {
2462                 int i;
2463
2464                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2465                              (chan * 0x2000) | 0x0200);
2466                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2467
2468                 for (i = 0; i < 6; i++)
2469                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2470                                      test_pat[chan][i]);
2471
2472                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2473                 if (tg3_wait_macro_done(tp)) {
2474                         *resetp = 1;
2475                         return -EBUSY;
2476                 }
2477
2478                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2479                              (chan * 0x2000) | 0x0200);
2480                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2481                 if (tg3_wait_macro_done(tp)) {
2482                         *resetp = 1;
2483                         return -EBUSY;
2484                 }
2485
2486                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2487                 if (tg3_wait_macro_done(tp)) {
2488                         *resetp = 1;
2489                         return -EBUSY;
2490                 }
2491
2492                 for (i = 0; i < 6; i += 2) {
2493                         u32 low, high;
2494
2495                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2496                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2497                             tg3_wait_macro_done(tp)) {
2498                                 *resetp = 1;
2499                                 return -EBUSY;
2500                         }
2501                         low &= 0x7fff;
2502                         high &= 0x000f;
2503                         if (low != test_pat[chan][i] ||
2504                             high != test_pat[chan][i+1]) {
2505                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2506                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2507                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2508
2509                                 return -EBUSY;
2510                         }
2511                 }
2512         }
2513
2514         return 0;
2515 }
2516
2517 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2518 {
2519         int chan;
2520
2521         for (chan = 0; chan < 4; chan++) {
2522                 int i;
2523
2524                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2525                              (chan * 0x2000) | 0x0200);
2526                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2527                 for (i = 0; i < 6; i++)
2528                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2529                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2530                 if (tg3_wait_macro_done(tp))
2531                         return -EBUSY;
2532         }
2533
2534         return 0;
2535 }
2536
2537 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2538 {
2539         u32 reg32, phy9_orig;
2540         int retries, do_phy_reset, err;
2541
2542         retries = 10;
2543         do_phy_reset = 1;
2544         do {
2545                 if (do_phy_reset) {
2546                         err = tg3_bmcr_reset(tp);
2547                         if (err)
2548                                 return err;
2549                         do_phy_reset = 0;
2550                 }
2551
2552                 /* Disable transmitter and interrupt.  */
2553                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2554                         continue;
2555
2556                 reg32 |= 0x3000;
2557                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2558
2559                 /* Set full-duplex, 1000 mbps.  */
2560                 tg3_writephy(tp, MII_BMCR,
2561                              BMCR_FULLDPLX | BMCR_SPEED1000);
2562
2563                 /* Set to master mode.  */
2564                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2565                         continue;
2566
2567                 tg3_writephy(tp, MII_CTRL1000,
2568                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2569
2570                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2571                 if (err)
2572                         return err;
2573
2574                 /* Block the PHY control access.  */
2575                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2576
2577                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2578                 if (!err)
2579                         break;
2580         } while (--retries);
2581
2582         err = tg3_phy_reset_chanpat(tp);
2583         if (err)
2584                 return err;
2585
2586         tg3_phydsp_write(tp, 0x8005, 0x0000);
2587
2588         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2589         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2590
2591         tg3_phy_toggle_auxctl_smdsp(tp, false);
2592
2593         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2594
2595         err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2596         if (err)
2597                 return err;
2598
2599         reg32 &= ~0x3000;
2600         tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2601
2602         return 0;
2603 }
2604
2605 static void tg3_carrier_off(struct tg3 *tp)
2606 {
2607         netif_carrier_off(tp->dev);
2608         tp->link_up = false;
2609 }
2610
2611 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2612 {
2613         if (tg3_flag(tp, ENABLE_ASF))
2614                 netdev_warn(tp->dev,
2615                             "Management side-band traffic will be interrupted during phy settings change\n");
2616 }
2617
2618 /* This will reset the tigon3 PHY if there is no valid
2619  * link unless the FORCE argument is non-zero.
2620  */
2621 static int tg3_phy_reset(struct tg3 *tp)
2622 {
2623         u32 val, cpmuctrl;
2624         int err;
2625
2626         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2627                 val = tr32(GRC_MISC_CFG);
2628                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2629                 udelay(40);
2630         }
2631         err  = tg3_readphy(tp, MII_BMSR, &val);
2632         err |= tg3_readphy(tp, MII_BMSR, &val);
2633         if (err != 0)
2634                 return -EBUSY;
2635
2636         if (netif_running(tp->dev) && tp->link_up) {
2637                 netif_carrier_off(tp->dev);
2638                 tg3_link_report(tp);
2639         }
2640
2641         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2642             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2643             tg3_asic_rev(tp) == ASIC_REV_5705) {
2644                 err = tg3_phy_reset_5703_4_5(tp);
2645                 if (err)
2646                         return err;
2647                 goto out;
2648         }
2649
2650         cpmuctrl = 0;
2651         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2652             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2653                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2654                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2655                         tw32(TG3_CPMU_CTRL,
2656                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2657         }
2658
2659         err = tg3_bmcr_reset(tp);
2660         if (err)
2661                 return err;
2662
2663         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2664                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2665                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2666
2667                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2668         }
2669
2670         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2671             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2672                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2673                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2674                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2675                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2676                         udelay(40);
2677                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2678                 }
2679         }
2680
2681         if (tg3_flag(tp, 5717_PLUS) &&
2682             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2683                 return 0;
2684
2685         tg3_phy_apply_otp(tp);
2686
2687         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2688                 tg3_phy_toggle_apd(tp, true);
2689         else
2690                 tg3_phy_toggle_apd(tp, false);
2691
2692 out:
2693         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2694             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2695                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2696                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2697                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2698         }
2699
2700         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2701                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2702                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2703         }
2704
2705         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2706                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2707                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2708                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2709                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2710                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2711                 }
2712         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2713                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2714                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2715                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2716                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2717                                 tg3_writephy(tp, MII_TG3_TEST1,
2718                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2719                         } else
2720                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2721
2722                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2723                 }
2724         }
2725
2726         /* Set Extended packet length bit (bit 14) on all chips that */
2727         /* support jumbo frames */
2728         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2729                 /* Cannot do read-modify-write on 5401 */
2730                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2731         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2732                 /* Set bit 14 with read-modify-write to preserve other bits */
2733                 err = tg3_phy_auxctl_read(tp,
2734                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2735                 if (!err)
2736                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2737                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2738         }
2739
2740         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2741          * jumbo frames transmission.
2742          */
2743         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2744                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2745                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2746                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2747         }
2748
2749         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2750                 /* adjust output voltage */
2751                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2752         }
2753
2754         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2755                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2756
2757         tg3_phy_toggle_automdix(tp, true);
2758         tg3_phy_set_wirespeed(tp);
2759         return 0;
2760 }
2761
2762 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2763 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2764 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2765                                           TG3_GPIO_MSG_NEED_VAUX)
2766 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2767         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2768          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2769          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2770          (TG3_GPIO_MSG_DRVR_PRES << 12))
2771
2772 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2773         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2774          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2775          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2776          (TG3_GPIO_MSG_NEED_VAUX << 12))
2777
2778 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2779 {
2780         u32 status, shift;
2781
2782         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2783             tg3_asic_rev(tp) == ASIC_REV_5719)
2784                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2785         else
2786                 status = tr32(TG3_CPMU_DRV_STATUS);
2787
2788         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2789         status &= ~(TG3_GPIO_MSG_MASK << shift);
2790         status |= (newstat << shift);
2791
2792         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2793             tg3_asic_rev(tp) == ASIC_REV_5719)
2794                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2795         else
2796                 tw32(TG3_CPMU_DRV_STATUS, status);
2797
2798         return status >> TG3_APE_GPIO_MSG_SHIFT;
2799 }
2800
2801 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2802 {
2803         if (!tg3_flag(tp, IS_NIC))
2804                 return 0;
2805
2806         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2807             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2808             tg3_asic_rev(tp) == ASIC_REV_5720) {
2809                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2810                         return -EIO;
2811
2812                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2813
2814                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2815                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2816
2817                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2818         } else {
2819                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2820                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2821         }
2822
2823         return 0;
2824 }
2825
2826 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2827 {
2828         u32 grc_local_ctrl;
2829
2830         if (!tg3_flag(tp, IS_NIC) ||
2831             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2832             tg3_asic_rev(tp) == ASIC_REV_5701)
2833                 return;
2834
2835         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2836
2837         tw32_wait_f(GRC_LOCAL_CTRL,
2838                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2839                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2840
2841         tw32_wait_f(GRC_LOCAL_CTRL,
2842                     grc_local_ctrl,
2843                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2844
2845         tw32_wait_f(GRC_LOCAL_CTRL,
2846                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2847                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2848 }
2849
2850 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2851 {
2852         if (!tg3_flag(tp, IS_NIC))
2853                 return;
2854
2855         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2856             tg3_asic_rev(tp) == ASIC_REV_5701) {
2857                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2858                             (GRC_LCLCTRL_GPIO_OE0 |
2859                              GRC_LCLCTRL_GPIO_OE1 |
2860                              GRC_LCLCTRL_GPIO_OE2 |
2861                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2862                              GRC_LCLCTRL_GPIO_OUTPUT1),
2863                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2864         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2865                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2866                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2867                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2868                                      GRC_LCLCTRL_GPIO_OE1 |
2869                                      GRC_LCLCTRL_GPIO_OE2 |
2870                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2871                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2872                                      tp->grc_local_ctrl;
2873                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2874                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2875
2876                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2877                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2878                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2879
2880                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2881                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2882                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2883         } else {
2884                 u32 no_gpio2;
2885                 u32 grc_local_ctrl = 0;
2886
2887                 /* Workaround to prevent overdrawing Amps. */
2888                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2889                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2890                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2891                                     grc_local_ctrl,
2892                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2893                 }
2894
2895                 /* On 5753 and variants, GPIO2 cannot be used. */
2896                 no_gpio2 = tp->nic_sram_data_cfg &
2897                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2898
2899                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2900                                   GRC_LCLCTRL_GPIO_OE1 |
2901                                   GRC_LCLCTRL_GPIO_OE2 |
2902                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2903                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2904                 if (no_gpio2) {
2905                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2906                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2907                 }
2908                 tw32_wait_f(GRC_LOCAL_CTRL,
2909                             tp->grc_local_ctrl | grc_local_ctrl,
2910                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2911
2912                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2913
2914                 tw32_wait_f(GRC_LOCAL_CTRL,
2915                             tp->grc_local_ctrl | grc_local_ctrl,
2916                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2917
2918                 if (!no_gpio2) {
2919                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2920                         tw32_wait_f(GRC_LOCAL_CTRL,
2921                                     tp->grc_local_ctrl | grc_local_ctrl,
2922                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2923                 }
2924         }
2925 }
2926
2927 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2928 {
2929         u32 msg = 0;
2930
2931         /* Serialize power state transitions */
2932         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2933                 return;
2934
2935         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2936                 msg = TG3_GPIO_MSG_NEED_VAUX;
2937
2938         msg = tg3_set_function_status(tp, msg);
2939
2940         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2941                 goto done;
2942
2943         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2944                 tg3_pwrsrc_switch_to_vaux(tp);
2945         else
2946                 tg3_pwrsrc_die_with_vmain(tp);
2947
2948 done:
2949         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2950 }
2951
2952 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2953 {
2954         bool need_vaux = false;
2955
2956         /* The GPIOs do something completely different on 57765. */
2957         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2958                 return;
2959
2960         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2961             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2962             tg3_asic_rev(tp) == ASIC_REV_5720) {
2963                 tg3_frob_aux_power_5717(tp, include_wol ?
2964                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2965                 return;
2966         }
2967
2968         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2969                 struct net_device *dev_peer;
2970
2971                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2972
2973                 /* remove_one() may have been run on the peer. */
2974                 if (dev_peer) {
2975                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2976
2977                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2978                                 return;
2979
2980                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2981                             tg3_flag(tp_peer, ENABLE_ASF))
2982                                 need_vaux = true;
2983                 }
2984         }
2985
2986         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2987             tg3_flag(tp, ENABLE_ASF))
2988                 need_vaux = true;
2989
2990         if (need_vaux)
2991                 tg3_pwrsrc_switch_to_vaux(tp);
2992         else
2993                 tg3_pwrsrc_die_with_vmain(tp);
2994 }
2995
2996 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2997 {
2998         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2999                 return 1;
3000         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3001                 if (speed != SPEED_10)
3002                         return 1;
3003         } else if (speed == SPEED_10)
3004                 return 1;
3005
3006         return 0;
3007 }
3008
3009 static bool tg3_phy_power_bug(struct tg3 *tp)
3010 {
3011         switch (tg3_asic_rev(tp)) {
3012         case ASIC_REV_5700:
3013         case ASIC_REV_5704:
3014                 return true;
3015         case ASIC_REV_5780:
3016                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3017                         return true;
3018                 return false;
3019         case ASIC_REV_5717:
3020                 if (!tp->pci_fn)
3021                         return true;
3022                 return false;
3023         case ASIC_REV_5719:
3024         case ASIC_REV_5720:
3025                 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3026                     !tp->pci_fn)
3027                         return true;
3028                 return false;
3029         }
3030
3031         return false;
3032 }
3033
3034 static bool tg3_phy_led_bug(struct tg3 *tp)
3035 {
3036         switch (tg3_asic_rev(tp)) {
3037         case ASIC_REV_5719:
3038         case ASIC_REV_5720:
3039                 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3040                     !tp->pci_fn)
3041                         return true;
3042                 return false;
3043         }
3044
3045         return false;
3046 }
3047
3048 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3049 {
3050         u32 val;
3051
3052         if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3053                 return;
3054
3055         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3056                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3057                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3058                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3059
3060                         sg_dig_ctrl |=
3061                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3062                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
3063                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3064                 }
3065                 return;
3066         }
3067
3068         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3069                 tg3_bmcr_reset(tp);
3070                 val = tr32(GRC_MISC_CFG);
3071                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3072                 udelay(40);
3073                 return;
3074         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3075                 u32 phytest;
3076                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3077                         u32 phy;
3078
3079                         tg3_writephy(tp, MII_ADVERTISE, 0);
3080                         tg3_writephy(tp, MII_BMCR,
3081                                      BMCR_ANENABLE | BMCR_ANRESTART);
3082
3083                         tg3_writephy(tp, MII_TG3_FET_TEST,
3084                                      phytest | MII_TG3_FET_SHADOW_EN);
3085                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3086                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3087                                 tg3_writephy(tp,
3088                                              MII_TG3_FET_SHDW_AUXMODE4,
3089                                              phy);
3090                         }
3091                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3092                 }
3093                 return;
3094         } else if (do_low_power) {
3095                 if (!tg3_phy_led_bug(tp))
3096                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3097                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3098
3099                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3100                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3101                       MII_TG3_AUXCTL_PCTL_VREG_11V;
3102                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3103         }
3104
3105         /* The PHY should not be powered down on some chips because
3106          * of bugs.
3107          */
3108         if (tg3_phy_power_bug(tp))
3109                 return;
3110
3111         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3112             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3113                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3114                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3115                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3116                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3117         }
3118
3119         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3120 }
3121
3122 /* tp->lock is held. */
3123 static int tg3_nvram_lock(struct tg3 *tp)
3124 {
3125         if (tg3_flag(tp, NVRAM)) {
3126                 int i;
3127
3128                 if (tp->nvram_lock_cnt == 0) {
3129                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3130                         for (i = 0; i < 8000; i++) {
3131                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3132                                         break;
3133                                 udelay(20);
3134                         }
3135                         if (i == 8000) {
3136                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3137                                 return -ENODEV;
3138                         }
3139                 }
3140                 tp->nvram_lock_cnt++;
3141         }
3142         return 0;
3143 }
3144
3145 /* tp->lock is held. */
3146 static void tg3_nvram_unlock(struct tg3 *tp)
3147 {
3148         if (tg3_flag(tp, NVRAM)) {
3149                 if (tp->nvram_lock_cnt > 0)
3150                         tp->nvram_lock_cnt--;
3151                 if (tp->nvram_lock_cnt == 0)
3152                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3153         }
3154 }
3155
3156 /* tp->lock is held. */
3157 static void tg3_enable_nvram_access(struct tg3 *tp)
3158 {
3159         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3160                 u32 nvaccess = tr32(NVRAM_ACCESS);
3161
3162                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3163         }
3164 }
3165
3166 /* tp->lock is held. */
3167 static void tg3_disable_nvram_access(struct tg3 *tp)
3168 {
3169         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3170                 u32 nvaccess = tr32(NVRAM_ACCESS);
3171
3172                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3173         }
3174 }
3175
3176 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3177                                         u32 offset, u32 *val)
3178 {
3179         u32 tmp;
3180         int i;
3181
3182         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3183                 return -EINVAL;
3184
3185         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3186                                         EEPROM_ADDR_DEVID_MASK |
3187                                         EEPROM_ADDR_READ);
3188         tw32(GRC_EEPROM_ADDR,
3189              tmp |
3190              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3191              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3192               EEPROM_ADDR_ADDR_MASK) |
3193              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3194
3195         for (i = 0; i < 1000; i++) {
3196                 tmp = tr32(GRC_EEPROM_ADDR);
3197
3198                 if (tmp & EEPROM_ADDR_COMPLETE)
3199                         break;
3200                 msleep(1);
3201         }
3202         if (!(tmp & EEPROM_ADDR_COMPLETE))
3203                 return -EBUSY;
3204
3205         tmp = tr32(GRC_EEPROM_DATA);
3206
3207         /*
3208          * The data will always be opposite the native endian
3209          * format.  Perform a blind byteswap to compensate.
3210          */
3211         *val = swab32(tmp);
3212
3213         return 0;
3214 }
3215
3216 #define NVRAM_CMD_TIMEOUT 5000
3217
3218 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3219 {
3220         int i;
3221
3222         tw32(NVRAM_CMD, nvram_cmd);
3223         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3224                 usleep_range(10, 40);
3225                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3226                         udelay(10);
3227                         break;
3228                 }
3229         }
3230
3231         if (i == NVRAM_CMD_TIMEOUT)
3232                 return -EBUSY;
3233
3234         return 0;
3235 }
3236
3237 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3238 {
3239         if (tg3_flag(tp, NVRAM) &&
3240             tg3_flag(tp, NVRAM_BUFFERED) &&
3241             tg3_flag(tp, FLASH) &&
3242             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3243             (tp->nvram_jedecnum == JEDEC_ATMEL))
3244
3245                 addr = ((addr / tp->nvram_pagesize) <<
3246                         ATMEL_AT45DB0X1B_PAGE_POS) +
3247                        (addr % tp->nvram_pagesize);
3248
3249         return addr;
3250 }
3251
3252 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3253 {
3254         if (tg3_flag(tp, NVRAM) &&
3255             tg3_flag(tp, NVRAM_BUFFERED) &&
3256             tg3_flag(tp, FLASH) &&
3257             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3258             (tp->nvram_jedecnum == JEDEC_ATMEL))
3259
3260                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3261                         tp->nvram_pagesize) +
3262                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3263
3264         return addr;
3265 }
3266
3267 /* NOTE: Data read in from NVRAM is byteswapped according to
3268  * the byteswapping settings for all other register accesses.
3269  * tg3 devices are BE devices, so on a BE machine, the data
3270  * returned will be exactly as it is seen in NVRAM.  On a LE
3271  * machine, the 32-bit value will be byteswapped.
3272  */
3273 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3274 {
3275         int ret;
3276
3277         if (!tg3_flag(tp, NVRAM))
3278                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3279
3280         offset = tg3_nvram_phys_addr(tp, offset);
3281
3282         if (offset > NVRAM_ADDR_MSK)
3283                 return -EINVAL;
3284
3285         ret = tg3_nvram_lock(tp);
3286         if (ret)
3287                 return ret;
3288
3289         tg3_enable_nvram_access(tp);
3290
3291         tw32(NVRAM_ADDR, offset);
3292         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3293                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3294
3295         if (ret == 0)
3296                 *val = tr32(NVRAM_RDDATA);
3297
3298         tg3_disable_nvram_access(tp);
3299
3300         tg3_nvram_unlock(tp);
3301
3302         return ret;
3303 }
3304
3305 /* Ensures NVRAM data is in bytestream format. */
3306 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3307 {
3308         u32 v;
3309         int res = tg3_nvram_read(tp, offset, &v);
3310         if (!res)
3311                 *val = cpu_to_be32(v);
3312         return res;
3313 }
3314
3315 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3316                                     u32 offset, u32 len, u8 *buf)
3317 {
3318         int i, j, rc = 0;
3319         u32 val;
3320
3321         for (i = 0; i < len; i += 4) {
3322                 u32 addr;
3323                 __be32 data;
3324
3325                 addr = offset + i;
3326
3327                 memcpy(&data, buf + i, 4);
3328
3329                 /*
3330                  * The SEEPROM interface expects the data to always be opposite
3331                  * the native endian format.  We accomplish this by reversing
3332                  * all the operations that would have been performed on the
3333                  * data from a call to tg3_nvram_read_be32().
3334                  */
3335                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3336
3337                 val = tr32(GRC_EEPROM_ADDR);
3338                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3339
3340                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3341                         EEPROM_ADDR_READ);
3342                 tw32(GRC_EEPROM_ADDR, val |
3343                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3344                         (addr & EEPROM_ADDR_ADDR_MASK) |
3345                         EEPROM_ADDR_START |
3346                         EEPROM_ADDR_WRITE);
3347
3348                 for (j = 0; j < 1000; j++) {
3349                         val = tr32(GRC_EEPROM_ADDR);
3350
3351                         if (val & EEPROM_ADDR_COMPLETE)
3352                                 break;
3353                         msleep(1);
3354                 }
3355                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3356                         rc = -EBUSY;
3357                         break;
3358                 }
3359         }
3360
3361         return rc;
3362 }
3363
3364 /* offset and length are dword aligned */
3365 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3366                 u8 *buf)
3367 {
3368         int ret = 0;
3369         u32 pagesize = tp->nvram_pagesize;
3370         u32 pagemask = pagesize - 1;
3371         u32 nvram_cmd;
3372         u8 *tmp;
3373
3374         tmp = kmalloc(pagesize, GFP_KERNEL);
3375         if (tmp == NULL)
3376                 return -ENOMEM;
3377
3378         while (len) {
3379                 int j;
3380                 u32 phy_addr, page_off, size;
3381
3382                 phy_addr = offset & ~pagemask;
3383
3384                 for (j = 0; j < pagesize; j += 4) {
3385                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3386                                                   (__be32 *) (tmp + j));
3387                         if (ret)
3388                                 break;
3389                 }
3390                 if (ret)
3391                         break;
3392
3393                 page_off = offset & pagemask;
3394                 size = pagesize;
3395                 if (len < size)
3396                         size = len;
3397
3398                 len -= size;
3399
3400                 memcpy(tmp + page_off, buf, size);
3401
3402                 offset = offset + (pagesize - page_off);
3403
3404                 tg3_enable_nvram_access(tp);
3405
3406                 /*
3407                  * Before we can erase the flash page, we need
3408                  * to issue a special "write enable" command.
3409                  */
3410                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3411
3412                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3413                         break;
3414
3415                 /* Erase the target page */
3416                 tw32(NVRAM_ADDR, phy_addr);
3417
3418                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3419                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3420
3421                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3422                         break;
3423
3424                 /* Issue another write enable to start the write. */
3425                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3426
3427                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3428                         break;
3429
3430                 for (j = 0; j < pagesize; j += 4) {
3431                         __be32 data;
3432
3433                         data = *((__be32 *) (tmp + j));
3434
3435                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3436
3437                         tw32(NVRAM_ADDR, phy_addr + j);
3438
3439                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3440                                 NVRAM_CMD_WR;
3441
3442                         if (j == 0)
3443                                 nvram_cmd |= NVRAM_CMD_FIRST;
3444                         else if (j == (pagesize - 4))
3445                                 nvram_cmd |= NVRAM_CMD_LAST;
3446
3447                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3448                         if (ret)
3449                                 break;
3450                 }
3451                 if (ret)
3452                         break;
3453         }
3454
3455         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3456         tg3_nvram_exec_cmd(tp, nvram_cmd);
3457
3458         kfree(tmp);
3459
3460         return ret;
3461 }
3462
3463 /* offset and length are dword aligned */
3464 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3465                 u8 *buf)
3466 {
3467         int i, ret = 0;
3468
3469         for (i = 0; i < len; i += 4, offset += 4) {
3470                 u32 page_off, phy_addr, nvram_cmd;
3471                 __be32 data;
3472
3473                 memcpy(&data, buf + i, 4);
3474                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3475
3476                 page_off = offset % tp->nvram_pagesize;
3477
3478                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3479
3480                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3481
3482                 if (page_off == 0 || i == 0)
3483                         nvram_cmd |= NVRAM_CMD_FIRST;
3484                 if (page_off == (tp->nvram_pagesize - 4))
3485                         nvram_cmd |= NVRAM_CMD_LAST;
3486
3487                 if (i == (len - 4))
3488                         nvram_cmd |= NVRAM_CMD_LAST;
3489
3490                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3491                     !tg3_flag(tp, FLASH) ||
3492                     !tg3_flag(tp, 57765_PLUS))
3493                         tw32(NVRAM_ADDR, phy_addr);
3494
3495                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3496                     !tg3_flag(tp, 5755_PLUS) &&
3497                     (tp->nvram_jedecnum == JEDEC_ST) &&
3498                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3499                         u32 cmd;
3500
3501                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3502                         ret = tg3_nvram_exec_cmd(tp, cmd);
3503                         if (ret)
3504                                 break;
3505                 }
3506                 if (!tg3_flag(tp, FLASH)) {
3507                         /* We always do complete word writes to eeprom. */
3508                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3509                 }
3510
3511                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3512                 if (ret)
3513                         break;
3514         }
3515         return ret;
3516 }
3517
3518 /* offset and length are dword aligned */
3519 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3520 {
3521         int ret;
3522
3523         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3524                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3525                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3526                 udelay(40);
3527         }
3528
3529         if (!tg3_flag(tp, NVRAM)) {
3530                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3531         } else {
3532                 u32 grc_mode;
3533
3534                 ret = tg3_nvram_lock(tp);
3535                 if (ret)
3536                         return ret;
3537
3538                 tg3_enable_nvram_access(tp);
3539                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3540                         tw32(NVRAM_WRITE1, 0x406);
3541
3542                 grc_mode = tr32(GRC_MODE);
3543                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3544
3545                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3546                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3547                                 buf);
3548                 } else {
3549                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3550                                 buf);
3551                 }
3552
3553                 grc_mode = tr32(GRC_MODE);
3554                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3555
3556                 tg3_disable_nvram_access(tp);
3557                 tg3_nvram_unlock(tp);
3558         }
3559
3560         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3561                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3562                 udelay(40);
3563         }
3564
3565         return ret;
3566 }
3567
3568 #define RX_CPU_SCRATCH_BASE     0x30000
3569 #define RX_CPU_SCRATCH_SIZE     0x04000
3570 #define TX_CPU_SCRATCH_BASE     0x34000
3571 #define TX_CPU_SCRATCH_SIZE     0x04000
3572
3573 /* tp->lock is held. */
3574 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3575 {
3576         int i;
3577         const int iters = 10000;
3578
3579         for (i = 0; i < iters; i++) {
3580                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3581                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3582                 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3583                         break;
3584                 if (pci_channel_offline(tp->pdev))
3585                         return -EBUSY;
3586         }
3587
3588         return (i == iters) ? -EBUSY : 0;
3589 }
3590
3591 /* tp->lock is held. */
3592 static int tg3_rxcpu_pause(struct tg3 *tp)
3593 {
3594         int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3595
3596         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3597         tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3598         udelay(10);
3599
3600         return rc;
3601 }
3602
3603 /* tp->lock is held. */
3604 static int tg3_txcpu_pause(struct tg3 *tp)
3605 {
3606         return tg3_pause_cpu(tp, TX_CPU_BASE);
3607 }
3608
3609 /* tp->lock is held. */
3610 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3611 {
3612         tw32(cpu_base + CPU_STATE, 0xffffffff);
3613         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3614 }
3615
3616 /* tp->lock is held. */
3617 static void tg3_rxcpu_resume(struct tg3 *tp)
3618 {
3619         tg3_resume_cpu(tp, RX_CPU_BASE);
3620 }
3621
3622 /* tp->lock is held. */
3623 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3624 {
3625         int rc;
3626
3627         BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3628
3629         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3630                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3631
3632                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3633                 return 0;
3634         }
3635         if (cpu_base == RX_CPU_BASE) {
3636                 rc = tg3_rxcpu_pause(tp);
3637         } else {
3638                 /*
3639                  * There is only an Rx CPU for the 5750 derivative in the
3640                  * BCM4785.
3641                  */
3642                 if (tg3_flag(tp, IS_SSB_CORE))
3643                         return 0;
3644
3645                 rc = tg3_txcpu_pause(tp);
3646         }
3647
3648         if (rc) {
3649                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3650                            __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3651                 return -ENODEV;
3652         }
3653
3654         /* Clear firmware's nvram arbitration. */
3655         if (tg3_flag(tp, NVRAM))
3656                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3657         return 0;
3658 }
3659
3660 static int tg3_fw_data_len(struct tg3 *tp,
3661                            const struct tg3_firmware_hdr *fw_hdr)
3662 {
3663         int fw_len;
3664
3665         /* Non fragmented firmware have one firmware header followed by a
3666          * contiguous chunk of data to be written. The length field in that
3667          * header is not the length of data to be written but the complete
3668          * length of the bss. The data length is determined based on
3669          * tp->fw->size minus headers.
3670          *
3671          * Fragmented firmware have a main header followed by multiple
3672          * fragments. Each fragment is identical to non fragmented firmware
3673          * with a firmware header followed by a contiguous chunk of data. In
3674          * the main header, the length field is unused and set to 0xffffffff.
3675          * In each fragment header the length is the entire size of that
3676          * fragment i.e. fragment data + header length. Data length is
3677          * therefore length field in the header minus TG3_FW_HDR_LEN.
3678          */
3679         if (tp->fw_len == 0xffffffff)
3680                 fw_len = be32_to_cpu(fw_hdr->len);
3681         else
3682                 fw_len = tp->fw->size;
3683
3684         return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3685 }
3686
3687 /* tp->lock is held. */
3688 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3689                                  u32 cpu_scratch_base, int cpu_scratch_size,
3690                                  const struct tg3_firmware_hdr *fw_hdr)
3691 {
3692         int err, i;
3693         void (*write_op)(struct tg3 *, u32, u32);
3694         int total_len = tp->fw->size;
3695
3696         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3697                 netdev_err(tp->dev,
3698                            "%s: Trying to load TX cpu firmware which is 5705\n",
3699                            __func__);
3700                 return -EINVAL;
3701         }
3702
3703         if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3704                 write_op = tg3_write_mem;
3705         else
3706                 write_op = tg3_write_indirect_reg32;
3707
3708         if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3709                 /* It is possible that bootcode is still loading at this point.
3710                  * Get the nvram lock first before halting the cpu.
3711                  */
3712                 int lock_err = tg3_nvram_lock(tp);
3713                 err = tg3_halt_cpu(tp, cpu_base);
3714                 if (!lock_err)
3715                         tg3_nvram_unlock(tp);
3716                 if (err)
3717                         goto out;
3718
3719                 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3720                         write_op(tp, cpu_scratch_base + i, 0);
3721                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3722                 tw32(cpu_base + CPU_MODE,
3723                      tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3724         } else {
3725                 /* Subtract additional main header for fragmented firmware and
3726                  * advance to the first fragment
3727                  */
3728                 total_len -= TG3_FW_HDR_LEN;
3729                 fw_hdr++;
3730         }
3731
3732         do {
3733                 u32 *fw_data = (u32 *)(fw_hdr + 1);
3734                 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3735                         write_op(tp, cpu_scratch_base +
3736                                      (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3737                                      (i * sizeof(u32)),
3738                                  be32_to_cpu(fw_data[i]));
3739
3740                 total_len -= be32_to_cpu(fw_hdr->len);
3741
3742                 /* Advance to next fragment */
3743                 fw_hdr = (struct tg3_firmware_hdr *)
3744                          ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3745         } while (total_len > 0);
3746
3747         err = 0;
3748
3749 out:
3750         return err;
3751 }
3752
3753 /* tp->lock is held. */
3754 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3755 {
3756         int i;
3757         const int iters = 5;
3758
3759         tw32(cpu_base + CPU_STATE, 0xffffffff);
3760         tw32_f(cpu_base + CPU_PC, pc);
3761
3762         for (i = 0; i < iters; i++) {
3763                 if (tr32(cpu_base + CPU_PC) == pc)
3764                         break;
3765                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3766                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3767                 tw32_f(cpu_base + CPU_PC, pc);
3768                 udelay(1000);
3769         }
3770
3771         return (i == iters) ? -EBUSY : 0;
3772 }
3773
3774 /* tp->lock is held. */
3775 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3776 {
3777         const struct tg3_firmware_hdr *fw_hdr;
3778         int err;
3779
3780         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3781
3782         /* Firmware blob starts with version numbers, followed by
3783            start address and length. We are setting complete length.
3784            length = end_address_of_bss - start_address_of_text.
3785            Remainder is the blob to be loaded contiguously
3786            from start address. */
3787
3788         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3789                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3790                                     fw_hdr);
3791         if (err)
3792                 return err;
3793
3794         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3795                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3796                                     fw_hdr);
3797         if (err)
3798                 return err;
3799
3800         /* Now startup only the RX cpu. */
3801         err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3802                                        be32_to_cpu(fw_hdr->base_addr));
3803         if (err) {
3804                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3805                            "should be %08x\n", __func__,
3806                            tr32(RX_CPU_BASE + CPU_PC),
3807                                 be32_to_cpu(fw_hdr->base_addr));
3808                 return -ENODEV;
3809         }
3810
3811         tg3_rxcpu_resume(tp);
3812
3813         return 0;
3814 }
3815
3816 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3817 {
3818         const int iters = 1000;
3819         int i;
3820         u32 val;
3821
3822         /* Wait for boot code to complete initialization and enter service
3823          * loop. It is then safe to download service patches
3824          */
3825         for (i = 0; i < iters; i++) {
3826                 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3827                         break;
3828
3829                 udelay(10);
3830         }
3831
3832         if (i == iters) {
3833                 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3834                 return -EBUSY;
3835         }
3836
3837         val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3838         if (val & 0xff) {
3839                 netdev_warn(tp->dev,
3840                             "Other patches exist. Not downloading EEE patch\n");
3841                 return -EEXIST;
3842         }
3843
3844         return 0;
3845 }
3846
3847 /* tp->lock is held. */
3848 static void tg3_load_57766_firmware(struct tg3 *tp)
3849 {
3850         struct tg3_firmware_hdr *fw_hdr;
3851
3852         if (!tg3_flag(tp, NO_NVRAM))
3853                 return;
3854
3855         if (tg3_validate_rxcpu_state(tp))
3856                 return;
3857
3858         if (!tp->fw)
3859                 return;
3860
3861         /* This firmware blob has a different format than older firmware
3862          * releases as given below. The main difference is we have fragmented
3863          * data to be written to non-contiguous locations.
3864          *
3865          * In the beginning we have a firmware header identical to other
3866          * firmware which consists of version, base addr and length. The length
3867          * here is unused and set to 0xffffffff.
3868          *
3869          * This is followed by a series of firmware fragments which are
3870          * individually identical to previous firmware. i.e. they have the
3871          * firmware header and followed by data for that fragment. The version
3872          * field of the individual fragment header is unused.
3873          */
3874
3875         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3876         if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3877                 return;
3878
3879         if (tg3_rxcpu_pause(tp))
3880                 return;
3881
3882         /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3883         tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3884
3885         tg3_rxcpu_resume(tp);
3886 }
3887
3888 /* tp->lock is held. */
3889 static int tg3_load_tso_firmware(struct tg3 *tp)
3890 {
3891         const struct tg3_firmware_hdr *fw_hdr;
3892         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3893         int err;
3894
3895         if (!tg3_flag(tp, FW_TSO))
3896                 return 0;
3897
3898         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3899
3900         /* Firmware blob starts with version numbers, followed by
3901            start address and length. We are setting complete length.
3902            length = end_address_of_bss - start_address_of_text.
3903            Remainder is the blob to be loaded contiguously
3904            from start address. */
3905
3906         cpu_scratch_size = tp->fw_len;
3907
3908         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3909                 cpu_base = RX_CPU_BASE;
3910                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3911         } else {
3912                 cpu_base = TX_CPU_BASE;
3913                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3914                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3915         }
3916
3917         err = tg3_load_firmware_cpu(tp, cpu_base,
3918                                     cpu_scratch_base, cpu_scratch_size,
3919                                     fw_hdr);
3920         if (err)
3921                 return err;
3922
3923         /* Now startup the cpu. */
3924         err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3925                                        be32_to_cpu(fw_hdr->base_addr));
3926         if (err) {
3927                 netdev_err(tp->dev,
3928                            "%s fails to set CPU PC, is %08x should be %08x\n",
3929                            __func__, tr32(cpu_base + CPU_PC),
3930                            be32_to_cpu(fw_hdr->base_addr));
3931                 return -ENODEV;
3932         }
3933
3934         tg3_resume_cpu(tp, cpu_base);
3935         return 0;
3936 }
3937
3938 /* tp->lock is held. */
3939 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3940 {
3941         u32 addr_high, addr_low;
3942
3943         addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3944         addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3945                     (mac_addr[4] <<  8) | mac_addr[5]);
3946
3947         if (index < 4) {
3948                 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3949                 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3950         } else {
3951                 index -= 4;
3952                 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3953                 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3954         }
3955 }
3956
3957 /* tp->lock is held. */
3958 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3959 {
3960         u32 addr_high;
3961         int i;
3962
3963         for (i = 0; i < 4; i++) {
3964                 if (i == 1 && skip_mac_1)
3965                         continue;
3966                 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3967         }
3968
3969         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3970             tg3_asic_rev(tp) == ASIC_REV_5704) {
3971                 for (i = 4; i < 16; i++)
3972                         __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3973         }
3974
3975         addr_high = (tp->dev->dev_addr[0] +
3976                      tp->dev->dev_addr[1] +
3977                      tp->dev->dev_addr[2] +
3978                      tp->dev->dev_addr[3] +
3979                      tp->dev->dev_addr[4] +
3980                      tp->dev->dev_addr[5]) &
3981                 TX_BACKOFF_SEED_MASK;
3982         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3983 }
3984
3985 static void tg3_enable_register_access(struct tg3 *tp)
3986 {
3987         /*
3988          * Make sure register accesses (indirect or otherwise) will function
3989          * correctly.
3990          */
3991         pci_write_config_dword(tp->pdev,
3992                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3993 }
3994
3995 static int tg3_power_up(struct tg3 *tp)
3996 {
3997         int err;
3998
3999         tg3_enable_register_access(tp);
4000
4001         err = pci_set_power_state(tp->pdev, PCI_D0);
4002         if (!err) {
4003                 /* Switch out of Vaux if it is a NIC */
4004                 tg3_pwrsrc_switch_to_vmain(tp);
4005         } else {
4006                 netdev_err(tp->dev, "Transition to D0 failed\n");
4007         }
4008
4009         return err;
4010 }
4011
4012 static int tg3_setup_phy(struct tg3 *, bool);
4013
4014 static int tg3_power_down_prepare(struct tg3 *tp)
4015 {
4016         u32 misc_host_ctrl;
4017         bool device_should_wake, do_low_power;
4018
4019         tg3_enable_register_access(tp);
4020
4021         /* Restore the CLKREQ setting. */
4022         if (tg3_flag(tp, CLKREQ_BUG))
4023                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4024                                          PCI_EXP_LNKCTL_CLKREQ_EN);
4025
4026         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4027         tw32(TG3PCI_MISC_HOST_CTRL,
4028              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4029
4030         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4031                              tg3_flag(tp, WOL_ENABLE);
4032
4033         if (tg3_flag(tp, USE_PHYLIB)) {
4034                 do_low_power = false;
4035                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4036                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4037                         struct phy_device *phydev;
4038                         u32 phyid, advertising;
4039
4040                         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4041
4042                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4043
4044                         tp->link_config.speed = phydev->speed;
4045                         tp->link_config.duplex = phydev->duplex;
4046                         tp->link_config.autoneg = phydev->autoneg;
4047                         tp->link_config.advertising = phydev->advertising;
4048
4049                         advertising = ADVERTISED_TP |
4050                                       ADVERTISED_Pause |
4051                                       ADVERTISED_Autoneg |
4052                                       ADVERTISED_10baseT_Half;
4053
4054                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4055                                 if (tg3_flag(tp, WOL_SPEED_100MB))
4056                                         advertising |=
4057                                                 ADVERTISED_100baseT_Half |
4058                                                 ADVERTISED_100baseT_Full |
4059                                                 ADVERTISED_10baseT_Full;
4060                                 else
4061                                         advertising |= ADVERTISED_10baseT_Full;
4062                         }
4063
4064                         phydev->advertising = advertising;
4065
4066                         phy_start_aneg(phydev);
4067
4068                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4069                         if (phyid != PHY_ID_BCMAC131) {
4070                                 phyid &= PHY_BCM_OUI_MASK;
4071                                 if (phyid == PHY_BCM_OUI_1 ||
4072                                     phyid == PHY_BCM_OUI_2 ||
4073                                     phyid == PHY_BCM_OUI_3)
4074                                         do_low_power = true;
4075                         }
4076                 }
4077         } else {
4078                 do_low_power = true;
4079
4080                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4081                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4082
4083                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4084                         tg3_setup_phy(tp, false);
4085         }
4086
4087         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4088                 u32 val;
4089
4090                 val = tr32(GRC_VCPU_EXT_CTRL);
4091                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4092         } else if (!tg3_flag(tp, ENABLE_ASF)) {
4093                 int i;
4094                 u32 val;
4095
4096                 for (i = 0; i < 200; i++) {
4097                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4098                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4099                                 break;
4100                         msleep(1);
4101                 }
4102         }
4103         if (tg3_flag(tp, WOL_CAP))
4104                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4105                                                      WOL_DRV_STATE_SHUTDOWN |
4106                                                      WOL_DRV_WOL |
4107                                                      WOL_SET_MAGIC_PKT);
4108
4109         if (device_should_wake) {
4110                 u32 mac_mode;
4111
4112                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4113                         if (do_low_power &&
4114                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4115                                 tg3_phy_auxctl_write(tp,
4116                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4117                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
4118                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4119                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4120                                 udelay(40);
4121                         }
4122
4123                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4124                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
4125                         else if (tp->phy_flags &
4126                                  TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4127                                 if (tp->link_config.active_speed == SPEED_1000)
4128                                         mac_mode = MAC_MODE_PORT_MODE_GMII;
4129                                 else
4130                                         mac_mode = MAC_MODE_PORT_MODE_MII;
4131                         } else
4132                                 mac_mode = MAC_MODE_PORT_MODE_MII;
4133
4134                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4135                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4136                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4137                                              SPEED_100 : SPEED_10;
4138                                 if (tg3_5700_link_polarity(tp, speed))
4139                                         mac_mode |= MAC_MODE_LINK_POLARITY;
4140                                 else
4141                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
4142                         }
4143                 } else {
4144                         mac_mode = MAC_MODE_PORT_MODE_TBI;
4145                 }
4146
4147                 if (!tg3_flag(tp, 5750_PLUS))
4148                         tw32(MAC_LED_CTRL, tp->led_ctrl);
4149
4150                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4151                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4152                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4153                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4154
4155                 if (tg3_flag(tp, ENABLE_APE))
4156                         mac_mode |= MAC_MODE_APE_TX_EN |
4157                                     MAC_MODE_APE_RX_EN |
4158                                     MAC_MODE_TDE_ENABLE;
4159
4160                 tw32_f(MAC_MODE, mac_mode);
4161                 udelay(100);
4162
4163                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4164                 udelay(10);
4165         }
4166
4167         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4168             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4169              tg3_asic_rev(tp) == ASIC_REV_5701)) {
4170                 u32 base_val;
4171
4172                 base_val = tp->pci_clock_ctrl;
4173                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4174                              CLOCK_CTRL_TXCLK_DISABLE);
4175
4176                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4177                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
4178         } else if (tg3_flag(tp, 5780_CLASS) ||
4179                    tg3_flag(tp, CPMU_PRESENT) ||
4180                    tg3_asic_rev(tp) == ASIC_REV_5906) {
4181                 /* do nothing */
4182         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4183                 u32 newbits1, newbits2;
4184
4185                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4186                     tg3_asic_rev(tp) == ASIC_REV_5701) {
4187                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4188                                     CLOCK_CTRL_TXCLK_DISABLE |
4189                                     CLOCK_CTRL_ALTCLK);
4190                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4191                 } else if (tg3_flag(tp, 5705_PLUS)) {
4192                         newbits1 = CLOCK_CTRL_625_CORE;
4193                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4194                 } else {
4195                         newbits1 = CLOCK_CTRL_ALTCLK;
4196                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4197                 }
4198
4199                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4200                             40);
4201
4202                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4203                             40);
4204
4205                 if (!tg3_flag(tp, 5705_PLUS)) {
4206                         u32 newbits3;
4207
4208                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4209                             tg3_asic_rev(tp) == ASIC_REV_5701) {
4210                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4211                                             CLOCK_CTRL_TXCLK_DISABLE |
4212                                             CLOCK_CTRL_44MHZ_CORE);
4213                         } else {
4214                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4215                         }
4216
4217                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
4218                                     tp->pci_clock_ctrl | newbits3, 40);
4219                 }
4220         }
4221
4222         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4223                 tg3_power_down_phy(tp, do_low_power);
4224
4225         tg3_frob_aux_power(tp, true);
4226
4227         /* Workaround for unstable PLL clock */
4228         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4229             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4230              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4231                 u32 val = tr32(0x7d00);
4232
4233                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4234                 tw32(0x7d00, val);
4235                 if (!tg3_flag(tp, ENABLE_ASF)) {
4236                         int err;
4237
4238                         err = tg3_nvram_lock(tp);
4239                         tg3_halt_cpu(tp, RX_CPU_BASE);
4240                         if (!err)
4241                                 tg3_nvram_unlock(tp);
4242                 }
4243         }
4244
4245         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4246
4247         tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4248
4249         return 0;
4250 }
4251
4252 static void tg3_power_down(struct tg3 *tp)
4253 {
4254         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4255         pci_set_power_state(tp->pdev, PCI_D3hot);
4256 }
4257
4258 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4259 {
4260         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4261         case MII_TG3_AUX_STAT_10HALF:
4262                 *speed = SPEED_10;
4263                 *duplex = DUPLEX_HALF;
4264                 break;
4265
4266         case MII_TG3_AUX_STAT_10FULL:
4267                 *speed = SPEED_10;
4268                 *duplex = DUPLEX_FULL;
4269                 break;
4270
4271         case MII_TG3_AUX_STAT_100HALF:
4272                 *speed = SPEED_100;
4273                 *duplex = DUPLEX_HALF;
4274                 break;
4275
4276         case MII_TG3_AUX_STAT_100FULL:
4277                 *speed = SPEED_100;
4278                 *duplex = DUPLEX_FULL;
4279                 break;
4280
4281         case MII_TG3_AUX_STAT_1000HALF:
4282                 *speed = SPEED_1000;
4283                 *duplex = DUPLEX_HALF;
4284                 break;
4285
4286         case MII_TG3_AUX_STAT_1000FULL:
4287                 *speed = SPEED_1000;
4288                 *duplex = DUPLEX_FULL;
4289                 break;
4290
4291         default:
4292                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4293                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4294                                  SPEED_10;
4295                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4296                                   DUPLEX_HALF;
4297                         break;
4298                 }
4299                 *speed = SPEED_UNKNOWN;
4300                 *duplex = DUPLEX_UNKNOWN;
4301                 break;
4302         }
4303 }
4304
4305 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4306 {
4307         int err = 0;
4308         u32 val, new_adv;
4309
4310         new_adv = ADVERTISE_CSMA;
4311         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4312         new_adv |= mii_advertise_flowctrl(flowctrl);
4313
4314         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4315         if (err)
4316                 goto done;
4317
4318         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4319                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4320
4321                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4322                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4323                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4324
4325                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4326                 if (err)
4327                         goto done;
4328         }
4329
4330         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4331                 goto done;
4332
4333         tw32(TG3_CPMU_EEE_MODE,
4334              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4335
4336         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4337         if (!err) {
4338                 u32 err2;
4339
4340                 val = 0;
4341                 /* Advertise 100-BaseTX EEE ability */
4342                 if (advertise & ADVERTISED_100baseT_Full)
4343                         val |= MDIO_AN_EEE_ADV_100TX;
4344                 /* Advertise 1000-BaseT EEE ability */
4345                 if (advertise & ADVERTISED_1000baseT_Full)
4346                         val |= MDIO_AN_EEE_ADV_1000T;
4347
4348                 if (!tp->eee.eee_enabled) {
4349                         val = 0;
4350                         tp->eee.advertised = 0;
4351                 } else {
4352                         tp->eee.advertised = advertise &
4353                                              (ADVERTISED_100baseT_Full |
4354                                               ADVERTISED_1000baseT_Full);
4355                 }
4356
4357                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4358                 if (err)
4359                         val = 0;
4360
4361                 switch (tg3_asic_rev(tp)) {
4362                 case ASIC_REV_5717:
4363                 case ASIC_REV_57765:
4364                 case ASIC_REV_57766:
4365                 case ASIC_REV_5719:
4366                         /* If we advertised any eee advertisements above... */
4367                         if (val)
4368                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4369                                       MII_TG3_DSP_TAP26_RMRXSTO |
4370                                       MII_TG3_DSP_TAP26_OPCSINPT;
4371                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4372                         /* Fall through */
4373                 case ASIC_REV_5720:
4374                 case ASIC_REV_5762:
4375                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4376                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4377                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4378                 }
4379
4380                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4381                 if (!err)
4382                         err = err2;
4383         }
4384
4385 done:
4386         return err;
4387 }
4388
4389 static void tg3_phy_copper_begin(struct tg3 *tp)
4390 {
4391         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4392             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4393                 u32 adv, fc;
4394
4395                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4396                     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4397                         adv = ADVERTISED_10baseT_Half |
4398                               ADVERTISED_10baseT_Full;
4399                         if (tg3_flag(tp, WOL_SPEED_100MB))
4400                                 adv |= ADVERTISED_100baseT_Half |
4401                                        ADVERTISED_100baseT_Full;
4402                         if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4403                                 if (!(tp->phy_flags &
4404                                       TG3_PHYFLG_DISABLE_1G_HD_ADV))
4405                                         adv |= ADVERTISED_1000baseT_Half;
4406                                 adv |= ADVERTISED_1000baseT_Full;
4407                         }
4408
4409                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4410                 } else {
4411                         adv = tp->link_config.advertising;
4412                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4413                                 adv &= ~(ADVERTISED_1000baseT_Half |
4414                                          ADVERTISED_1000baseT_Full);
4415
4416                         fc = tp->link_config.flowctrl;
4417                 }
4418
4419                 tg3_phy_autoneg_cfg(tp, adv, fc);
4420
4421                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4422                     (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4423                         /* Normally during power down we want to autonegotiate
4424                          * the lowest possible speed for WOL. However, to avoid
4425                          * link flap, we leave it untouched.
4426                          */
4427                         return;
4428                 }
4429
4430                 tg3_writephy(tp, MII_BMCR,
4431                              BMCR_ANENABLE | BMCR_ANRESTART);
4432         } else {
4433                 int i;
4434                 u32 bmcr, orig_bmcr;
4435
4436                 tp->link_config.active_speed = tp->link_config.speed;
4437                 tp->link_config.active_duplex = tp->link_config.duplex;
4438
4439                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4440                         /* With autoneg disabled, 5715 only links up when the
4441                          * advertisement register has the configured speed
4442                          * enabled.
4443                          */
4444                         tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4445                 }
4446
4447                 bmcr = 0;
4448                 switch (tp->link_config.speed) {
4449                 default:
4450                 case SPEED_10:
4451                         break;
4452
4453                 case SPEED_100:
4454                         bmcr |= BMCR_SPEED100;
4455                         break;
4456
4457                 case SPEED_1000:
4458                         bmcr |= BMCR_SPEED1000;
4459                         break;
4460                 }
4461
4462                 if (tp->link_config.duplex == DUPLEX_FULL)
4463                         bmcr |= BMCR_FULLDPLX;
4464
4465                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4466                     (bmcr != orig_bmcr)) {
4467                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4468                         for (i = 0; i < 1500; i++) {
4469                                 u32 tmp;
4470
4471                                 udelay(10);
4472                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4473                                     tg3_readphy(tp, MII_BMSR, &tmp))
4474                                         continue;
4475                                 if (!(tmp & BMSR_LSTATUS)) {
4476                                         udelay(40);
4477                                         break;
4478                                 }
4479                         }
4480                         tg3_writephy(tp, MII_BMCR, bmcr);
4481                         udelay(40);
4482                 }
4483         }
4484 }
4485
4486 static int tg3_phy_pull_config(struct tg3 *tp)
4487 {
4488         int err;
4489         u32 val;
4490
4491         err = tg3_readphy(tp, MII_BMCR, &val);
4492         if (err)
4493                 goto done;
4494
4495         if (!(val & BMCR_ANENABLE)) {
4496                 tp->link_config.autoneg = AUTONEG_DISABLE;
4497                 tp->link_config.advertising = 0;
4498                 tg3_flag_clear(tp, PAUSE_AUTONEG);
4499
4500                 err = -EIO;
4501
4502                 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4503                 case 0:
4504                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4505                                 goto done;
4506
4507                         tp->link_config.speed = SPEED_10;
4508                         break;
4509                 case BMCR_SPEED100:
4510                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4511                                 goto done;
4512
4513                         tp->link_config.speed = SPEED_100;
4514                         break;
4515                 case BMCR_SPEED1000:
4516                         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4517                                 tp->link_config.speed = SPEED_1000;
4518                                 break;
4519                         }
4520                         /* Fall through */
4521                 default:
4522                         goto done;
4523                 }
4524
4525                 if (val & BMCR_FULLDPLX)
4526                         tp->link_config.duplex = DUPLEX_FULL;
4527                 else
4528                         tp->link_config.duplex = DUPLEX_HALF;
4529
4530                 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4531
4532                 err = 0;
4533                 goto done;
4534         }
4535
4536         tp->link_config.autoneg = AUTONEG_ENABLE;
4537         tp->link_config.advertising = ADVERTISED_Autoneg;
4538         tg3_flag_set(tp, PAUSE_AUTONEG);
4539
4540         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4541                 u32 adv;
4542
4543                 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4544                 if (err)
4545                         goto done;
4546
4547                 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4548                 tp->link_config.advertising |= adv | ADVERTISED_TP;
4549
4550                 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4551         } else {
4552                 tp->link_config.advertising |= ADVERTISED_FIBRE;
4553         }
4554
4555         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4556                 u32 adv;
4557
4558                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4559                         err = tg3_readphy(tp, MII_CTRL1000, &val);
4560                         if (err)
4561                                 goto done;
4562
4563                         adv = mii_ctrl1000_to_ethtool_adv_t(val);
4564                 } else {
4565                         err = tg3_readphy(tp, MII_ADVERTISE, &val);
4566                         if (err)
4567                                 goto done;
4568
4569                         adv = tg3_decode_flowctrl_1000X(val);
4570                         tp->link_config.flowctrl = adv;
4571
4572                         val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4573                         adv = mii_adv_to_ethtool_adv_x(val);
4574                 }
4575
4576                 tp->link_config.advertising |= adv;
4577         }
4578
4579 done:
4580         return err;
4581 }
4582
4583 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4584 {
4585         int err;
4586
4587         /* Turn off tap power management. */
4588         /* Set Extended packet length bit */
4589         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4590
4591         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4592         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4593         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4594         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4595         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4596
4597         udelay(40);
4598
4599         return err;
4600 }
4601
4602 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4603 {
4604         struct ethtool_eee eee;
4605
4606         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4607                 return true;
4608
4609         tg3_eee_pull_config(tp, &eee);
4610
4611         if (tp->eee.eee_enabled) {
4612                 if (tp->eee.advertised != eee.advertised ||
4613                     tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4614                     tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4615                         return false;
4616         } else {
4617                 /* EEE is disabled but we're advertising */
4618                 if (eee.advertised)
4619                         return false;
4620         }
4621
4622         return true;
4623 }
4624
4625 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4626 {
4627         u32 advmsk, tgtadv, advertising;
4628
4629         advertising = tp->link_config.advertising;
4630         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4631
4632         advmsk = ADVERTISE_ALL;
4633         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4634                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4635                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4636         }
4637
4638         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4639                 return false;
4640
4641         if ((*lcladv & advmsk) != tgtadv)
4642                 return false;
4643
4644         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4645                 u32 tg3_ctrl;
4646
4647                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4648
4649                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4650                         return false;
4651
4652                 if (tgtadv &&
4653                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4654                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4655                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4656                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4657                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4658                 } else {
4659                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4660                 }
4661
4662                 if (tg3_ctrl != tgtadv)
4663                         return false;
4664         }
4665
4666         return true;
4667 }
4668
4669 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4670 {
4671         u32 lpeth = 0;
4672
4673         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4674                 u32 val;
4675
4676                 if (tg3_readphy(tp, MII_STAT1000, &val))
4677                         return false;
4678
4679                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4680         }
4681
4682         if (tg3_readphy(tp, MII_LPA, rmtadv))
4683                 return false;
4684
4685         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4686         tp->link_config.rmt_adv = lpeth;
4687
4688         return true;
4689 }
4690
4691 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4692 {
4693         if (curr_link_up != tp->link_up) {
4694                 if (curr_link_up) {
4695                         netif_carrier_on(tp->dev);
4696                 } else {
4697                         netif_carrier_off(tp->dev);
4698                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4699                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4700                 }
4701
4702                 tg3_link_report(tp);
4703                 return true;
4704         }
4705
4706         return false;
4707 }
4708
4709 static void tg3_clear_mac_status(struct tg3 *tp)
4710 {
4711         tw32(MAC_EVENT, 0);
4712
4713         tw32_f(MAC_STATUS,
4714                MAC_STATUS_SYNC_CHANGED |
4715                MAC_STATUS_CFG_CHANGED |
4716                MAC_STATUS_MI_COMPLETION |
4717                MAC_STATUS_LNKSTATE_CHANGED);
4718         udelay(40);
4719 }
4720
4721 static void tg3_setup_eee(struct tg3 *tp)
4722 {
4723         u32 val;
4724
4725         val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4726               TG3_CPMU_EEE_LNKIDL_UART_IDL;
4727         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4728                 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4729
4730         tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4731
4732         tw32_f(TG3_CPMU_EEE_CTRL,
4733                TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4734
4735         val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4736               (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4737               TG3_CPMU_EEEMD_LPI_IN_RX |
4738               TG3_CPMU_EEEMD_EEE_ENABLE;
4739
4740         if (tg3_asic_rev(tp) != ASIC_REV_5717)
4741                 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4742
4743         if (tg3_flag(tp, ENABLE_APE))
4744                 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4745
4746         tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4747
4748         tw32_f(TG3_CPMU_EEE_DBTMR1,
4749                TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4750                (tp->eee.tx_lpi_timer & 0xffff));
4751
4752         tw32_f(TG3_CPMU_EEE_DBTMR2,
4753                TG3_CPMU_DBTMR2_APE_TX_2047US |
4754                TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4755 }
4756
4757 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4758 {
4759         bool current_link_up;
4760         u32 bmsr, val;
4761         u32 lcl_adv, rmt_adv;
4762         u16 current_speed;
4763         u8 current_duplex;
4764         int i, err;
4765
4766         tg3_clear_mac_status(tp);
4767
4768         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4769                 tw32_f(MAC_MI_MODE,
4770                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4771                 udelay(80);
4772         }
4773
4774         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4775
4776         /* Some third-party PHYs need to be reset on link going
4777          * down.
4778          */
4779         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4780              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4781              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4782             tp->link_up) {
4783                 tg3_readphy(tp, MII_BMSR, &bmsr);
4784                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4785                     !(bmsr & BMSR_LSTATUS))
4786                         force_reset = true;
4787         }
4788         if (force_reset)
4789                 tg3_phy_reset(tp);
4790
4791         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4792                 tg3_readphy(tp, MII_BMSR, &bmsr);
4793                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4794                     !tg3_flag(tp, INIT_COMPLETE))
4795                         bmsr = 0;
4796
4797                 if (!(bmsr & BMSR_LSTATUS)) {
4798                         err = tg3_init_5401phy_dsp(tp);
4799                         if (err)
4800                                 return err;
4801
4802                         tg3_readphy(tp, MII_BMSR, &bmsr);
4803                         for (i = 0; i < 1000; i++) {
4804                                 udelay(10);
4805                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4806                                     (bmsr & BMSR_LSTATUS)) {
4807                                         udelay(40);
4808                                         break;
4809                                 }
4810                         }
4811
4812                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4813                             TG3_PHY_REV_BCM5401_B0 &&
4814                             !(bmsr & BMSR_LSTATUS) &&
4815                             tp->link_config.active_speed == SPEED_1000) {
4816                                 err = tg3_phy_reset(tp);
4817                                 if (!err)
4818                                         err = tg3_init_5401phy_dsp(tp);
4819                                 if (err)
4820                                         return err;
4821                         }
4822                 }
4823         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4824                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4825                 /* 5701 {A0,B0} CRC bug workaround */
4826                 tg3_writephy(tp, 0x15, 0x0a75);
4827                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4828                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4829                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4830         }
4831
4832         /* Clear pending interrupts... */
4833         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4834         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4835
4836         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4837                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4838         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4839                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4840
4841         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4842             tg3_asic_rev(tp) == ASIC_REV_5701) {
4843                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4844                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4845                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4846                 else
4847                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4848         }
4849
4850         current_link_up = false;
4851         current_speed = SPEED_UNKNOWN;
4852         current_duplex = DUPLEX_UNKNOWN;
4853         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4854         tp->link_config.rmt_adv = 0;
4855
4856         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4857                 err = tg3_phy_auxctl_read(tp,
4858                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4859                                           &val);
4860                 if (!err && !(val & (1 << 10))) {
4861                         tg3_phy_auxctl_write(tp,
4862                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4863                                              val | (1 << 10));
4864                         goto relink;
4865                 }
4866         }
4867
4868         bmsr = 0;
4869         for (i = 0; i < 100; i++) {
4870                 tg3_readphy(tp, MII_BMSR, &bmsr);
4871                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4872                     (bmsr & BMSR_LSTATUS))
4873                         break;
4874                 udelay(40);
4875         }
4876
4877         if (bmsr & BMSR_LSTATUS) {
4878                 u32 aux_stat, bmcr;
4879
4880                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4881                 for (i = 0; i < 2000; i++) {
4882                         udelay(10);
4883                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4884                             aux_stat)
4885                                 break;
4886                 }
4887
4888                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4889                                              &current_speed,
4890                                              &current_duplex);
4891
4892                 bmcr = 0;
4893                 for (i = 0; i < 200; i++) {
4894                         tg3_readphy(tp, MII_BMCR, &bmcr);
4895                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4896                                 continue;
4897                         if (bmcr && bmcr != 0x7fff)
4898                                 break;
4899                         udelay(10);
4900                 }
4901
4902                 lcl_adv = 0;
4903                 rmt_adv = 0;
4904
4905                 tp->link_config.active_speed = current_speed;
4906                 tp->link_config.active_duplex = current_duplex;
4907
4908                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4909                         bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4910
4911                         if ((bmcr & BMCR_ANENABLE) &&
4912                             eee_config_ok &&
4913                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4914                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4915                                 current_link_up = true;
4916
4917                         /* EEE settings changes take effect only after a phy
4918                          * reset.  If we have skipped a reset due to Link Flap
4919                          * Avoidance being enabled, do it now.
4920                          */
4921                         if (!eee_config_ok &&
4922                             (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4923                             !force_reset) {
4924                                 tg3_setup_eee(tp);
4925                                 tg3_phy_reset(tp);
4926                         }
4927                 } else {
4928                         if (!(bmcr & BMCR_ANENABLE) &&
4929                             tp->link_config.speed == current_speed &&
4930                             tp->link_config.duplex == current_duplex) {
4931                                 current_link_up = true;
4932                         }
4933                 }
4934
4935                 if (current_link_up &&
4936                     tp->link_config.active_duplex == DUPLEX_FULL) {
4937                         u32 reg, bit;
4938
4939                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4940                                 reg = MII_TG3_FET_GEN_STAT;
4941                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4942                         } else {
4943                                 reg = MII_TG3_EXT_STAT;
4944                                 bit = MII_TG3_EXT_STAT_MDIX;
4945                         }
4946
4947                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4948                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4949
4950                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4951                 }
4952         }
4953
4954 relink:
4955         if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4956                 tg3_phy_copper_begin(tp);
4957
4958                 if (tg3_flag(tp, ROBOSWITCH)) {
4959                         current_link_up = true;
4960                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4961                         current_speed = SPEED_1000;
4962                         current_duplex = DUPLEX_FULL;
4963                         tp->link_config.active_speed = current_speed;
4964                         tp->link_config.active_duplex = current_duplex;
4965                 }
4966
4967                 tg3_readphy(tp, MII_BMSR, &bmsr);
4968                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4969                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4970                         current_link_up = true;
4971         }
4972
4973         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4974         if (current_link_up) {
4975                 if (tp->link_config.active_speed == SPEED_100 ||
4976                     tp->link_config.active_speed == SPEED_10)
4977                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4978                 else
4979                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4980         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4981                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4982         else
4983                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4984
4985         /* In order for the 5750 core in BCM4785 chip to work properly
4986          * in RGMII mode, the Led Control Register must be set up.
4987          */
4988         if (tg3_flag(tp, RGMII_MODE)) {
4989                 u32 led_ctrl = tr32(MAC_LED_CTRL);
4990                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4991
4992                 if (tp->link_config.active_speed == SPEED_10)
4993                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4994                 else if (tp->link_config.active_speed == SPEED_100)
4995                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4996                                      LED_CTRL_100MBPS_ON);
4997                 else if (tp->link_config.active_speed == SPEED_1000)
4998                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4999                                      LED_CTRL_1000MBPS_ON);
5000
5001                 tw32(MAC_LED_CTRL, led_ctrl);
5002                 udelay(40);
5003         }
5004
5005         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5006         if (tp->link_config.active_duplex == DUPLEX_HALF)
5007                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5008
5009         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5010                 if (current_link_up &&
5011                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5012                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5013                 else
5014                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5015         }
5016
5017         /* ??? Without this setting Netgear GA302T PHY does not
5018          * ??? send/receive packets...
5019          */
5020         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5021             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5022                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5023                 tw32_f(MAC_MI_MODE, tp->mi_mode);
5024                 udelay(80);
5025         }
5026
5027         tw32_f(MAC_MODE, tp->mac_mode);
5028         udelay(40);
5029
5030         tg3_phy_eee_adjust(tp, current_link_up);
5031
5032         if (tg3_flag(tp, USE_LINKCHG_REG)) {
5033                 /* Polled via timer. */
5034                 tw32_f(MAC_EVENT, 0);
5035         } else {
5036                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5037         }
5038         udelay(40);
5039
5040         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5041             current_link_up &&
5042             tp->link_config.active_speed == SPEED_1000 &&
5043             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5044                 udelay(120);
5045                 tw32_f(MAC_STATUS,
5046                      (MAC_STATUS_SYNC_CHANGED |
5047                       MAC_STATUS_CFG_CHANGED));
5048                 udelay(40);
5049                 tg3_write_mem(tp,
5050                               NIC_SRAM_FIRMWARE_MBOX,
5051                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5052         }
5053
5054         /* Prevent send BD corruption. */
5055         if (tg3_flag(tp, CLKREQ_BUG)) {
5056                 if (tp->link_config.active_speed == SPEED_100 ||
5057                     tp->link_config.active_speed == SPEED_10)
5058                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5059                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
5060                 else
5061                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5062                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
5063         }
5064
5065         tg3_test_and_report_link_chg(tp, current_link_up);
5066
5067         return 0;
5068 }
5069
5070 struct tg3_fiber_aneginfo {
5071         int state;
5072 #define ANEG_STATE_UNKNOWN              0
5073 #define ANEG_STATE_AN_ENABLE            1
5074 #define ANEG_STATE_RESTART_INIT         2
5075 #define ANEG_STATE_RESTART              3
5076 #define ANEG_STATE_DISABLE_LINK_OK      4
5077 #define ANEG_STATE_ABILITY_DETECT_INIT  5
5078 #define ANEG_STATE_ABILITY_DETECT       6
5079 #define ANEG_STATE_ACK_DETECT_INIT      7
5080 #define ANEG_STATE_ACK_DETECT           8
5081 #define ANEG_STATE_COMPLETE_ACK_INIT    9
5082 #define ANEG_STATE_COMPLETE_ACK         10
5083 #define ANEG_STATE_IDLE_DETECT_INIT     11
5084 #define ANEG_STATE_IDLE_DETECT          12
5085 #define ANEG_STATE_LINK_OK              13
5086 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
5087 #define ANEG_STATE_NEXT_PAGE_WAIT       15
5088
5089         u32 flags;
5090 #define MR_AN_ENABLE            0x00000001
5091 #define MR_RESTART_AN           0x00000002
5092 #define MR_AN_COMPLETE          0x00000004
5093 #define MR_PAGE_RX              0x00000008
5094 #define MR_NP_LOADED            0x00000010
5095 #define MR_TOGGLE_TX            0x00000020
5096 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
5097 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
5098 #define MR_LP_ADV_SYM_PAUSE     0x00000100
5099 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
5100 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5101 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5102 #define MR_LP_ADV_NEXT_PAGE     0x00001000
5103 #define MR_TOGGLE_RX            0x00002000
5104 #define MR_NP_RX                0x00004000
5105
5106 #define MR_LINK_OK              0x80000000
5107
5108         unsigned long link_time, cur_time;
5109
5110         u32 ability_match_cfg;
5111         int ability_match_count;
5112
5113         char ability_match, idle_match, ack_match;
5114
5115         u32 txconfig, rxconfig;
5116 #define ANEG_CFG_NP             0x00000080
5117 #define ANEG_CFG_ACK            0x00000040
5118 #define ANEG_CFG_RF2            0x00000020
5119 #define ANEG_CFG_RF1            0x00000010
5120 #define ANEG_CFG_PS2            0x00000001
5121 #define ANEG_CFG_PS1            0x00008000
5122 #define ANEG_CFG_HD             0x00004000
5123 #define ANEG_CFG_FD             0x00002000
5124 #define ANEG_CFG_INVAL          0x00001f06
5125
5126 };
5127 #define ANEG_OK         0
5128 #define ANEG_DONE       1
5129 #define ANEG_TIMER_ENAB 2
5130 #define ANEG_FAILED     -1
5131
5132 #define ANEG_STATE_SETTLE_TIME  10000
5133
5134 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5135                                    struct tg3_fiber_aneginfo *ap)
5136 {
5137         u16 flowctrl;
5138         unsigned long delta;
5139         u32 rx_cfg_reg;
5140         int ret;
5141
5142         if (ap->state == ANEG_STATE_UNKNOWN) {
5143                 ap->rxconfig = 0;
5144                 ap->link_time = 0;
5145                 ap->cur_time = 0;
5146                 ap->ability_match_cfg = 0;
5147                 ap->ability_match_count = 0;
5148                 ap->ability_match = 0;
5149                 ap->idle_match = 0;
5150                 ap->ack_match = 0;
5151         }
5152         ap->cur_time++;
5153
5154         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5155                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5156
5157                 if (rx_cfg_reg != ap->ability_match_cfg) {
5158                         ap->ability_match_cfg = rx_cfg_reg;
5159                         ap->ability_match = 0;
5160                         ap->ability_match_count = 0;
5161                 } else {
5162                         if (++ap->ability_match_count > 1) {
5163                                 ap->ability_match = 1;
5164                                 ap->ability_match_cfg = rx_cfg_reg;
5165                         }
5166                 }
5167                 if (rx_cfg_reg & ANEG_CFG_ACK)
5168                         ap->ack_match = 1;
5169                 else
5170                         ap->ack_match = 0;
5171
5172                 ap->idle_match = 0;
5173         } else {
5174                 ap->idle_match = 1;
5175                 ap->ability_match_cfg = 0;
5176                 ap->ability_match_count = 0;
5177                 ap->ability_match = 0;
5178                 ap->ack_match = 0;
5179
5180                 rx_cfg_reg = 0;
5181         }
5182
5183         ap->rxconfig = rx_cfg_reg;
5184         ret = ANEG_OK;
5185
5186         switch (ap->state) {
5187         case ANEG_STATE_UNKNOWN:
5188                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5189                         ap->state = ANEG_STATE_AN_ENABLE;
5190
5191                 /* fallthru */
5192         case ANEG_STATE_AN_ENABLE:
5193                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5194                 if (ap->flags & MR_AN_ENABLE) {
5195                         ap->link_time = 0;
5196                         ap->cur_time = 0;
5197                         ap->ability_match_cfg = 0;
5198                         ap->ability_match_count = 0;
5199                         ap->ability_match = 0;
5200                         ap->idle_match = 0;
5201                         ap->ack_match = 0;
5202
5203                         ap->state = ANEG_STATE_RESTART_INIT;
5204                 } else {
5205                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
5206                 }
5207                 break;
5208
5209         case ANEG_STATE_RESTART_INIT:
5210                 ap->link_time = ap->cur_time;
5211                 ap->flags &= ~(MR_NP_LOADED);
5212                 ap->txconfig = 0;
5213                 tw32(MAC_TX_AUTO_NEG, 0);
5214                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5215                 tw32_f(MAC_MODE, tp->mac_mode);
5216                 udelay(40);
5217
5218                 ret = ANEG_TIMER_ENAB;
5219                 ap->state = ANEG_STATE_RESTART;
5220
5221                 /* fallthru */
5222         case ANEG_STATE_RESTART:
5223                 delta = ap->cur_time - ap->link_time;
5224                 if (delta > ANEG_STATE_SETTLE_TIME)
5225                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5226                 else
5227                         ret = ANEG_TIMER_ENAB;
5228                 break;
5229
5230         case ANEG_STATE_DISABLE_LINK_OK:
5231                 ret = ANEG_DONE;
5232                 break;
5233
5234         case ANEG_STATE_ABILITY_DETECT_INIT:
5235                 ap->flags &= ~(MR_TOGGLE_TX);
5236                 ap->txconfig = ANEG_CFG_FD;
5237                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5238                 if (flowctrl & ADVERTISE_1000XPAUSE)
5239                         ap->txconfig |= ANEG_CFG_PS1;
5240                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5241                         ap->txconfig |= ANEG_CFG_PS2;
5242                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5243                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5244                 tw32_f(MAC_MODE, tp->mac_mode);
5245                 udelay(40);
5246
5247                 ap->state = ANEG_STATE_ABILITY_DETECT;
5248                 break;
5249
5250         case ANEG_STATE_ABILITY_DETECT:
5251                 if (ap->ability_match != 0 && ap->rxconfig != 0)
5252                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
5253                 break;
5254
5255         case ANEG_STATE_ACK_DETECT_INIT:
5256                 ap->txconfig |= ANEG_CFG_ACK;
5257                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5258                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5259                 tw32_f(MAC_MODE, tp->mac_mode);
5260                 udelay(40);
5261
5262                 ap->state = ANEG_STATE_ACK_DETECT;
5263
5264                 /* fallthru */
5265         case ANEG_STATE_ACK_DETECT:
5266                 if (ap->ack_match != 0) {
5267                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5268                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5269                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5270                         } else {
5271                                 ap->state = ANEG_STATE_AN_ENABLE;
5272                         }
5273                 } else if (ap->ability_match != 0 &&
5274                            ap->rxconfig == 0) {
5275                         ap->state = ANEG_STATE_AN_ENABLE;
5276                 }
5277                 break;
5278
5279         case ANEG_STATE_COMPLETE_ACK_INIT:
5280                 if (ap->rxconfig & ANEG_CFG_INVAL) {
5281                         ret = ANEG_FAILED;
5282                         break;
5283                 }
5284                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5285                                MR_LP_ADV_HALF_DUPLEX |
5286                                MR_LP_ADV_SYM_PAUSE |
5287                                MR_LP_ADV_ASYM_PAUSE |
5288                                MR_LP_ADV_REMOTE_FAULT1 |
5289                                MR_LP_ADV_REMOTE_FAULT2 |
5290                                MR_LP_ADV_NEXT_PAGE |
5291                                MR_TOGGLE_RX |
5292                                MR_NP_RX);
5293                 if (ap->rxconfig & ANEG_CFG_FD)
5294                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5295                 if (ap->rxconfig & ANEG_CFG_HD)
5296                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5297                 if (ap->rxconfig & ANEG_CFG_PS1)
5298                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
5299                 if (ap->rxconfig & ANEG_CFG_PS2)
5300                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5301                 if (ap->rxconfig & ANEG_CFG_RF1)
5302                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5303                 if (ap->rxconfig & ANEG_CFG_RF2)
5304                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5305                 if (ap->rxconfig & ANEG_CFG_NP)
5306                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
5307
5308                 ap->link_time = ap->cur_time;
5309
5310                 ap->flags ^= (MR_TOGGLE_TX);
5311                 if (ap->rxconfig & 0x0008)
5312                         ap->flags |= MR_TOGGLE_RX;
5313                 if (ap->rxconfig & ANEG_CFG_NP)
5314                         ap->flags |= MR_NP_RX;
5315                 ap->flags |= MR_PAGE_RX;
5316
5317                 ap->state = ANEG_STATE_COMPLETE_ACK;
5318                 ret = ANEG_TIMER_ENAB;
5319                 break;
5320
5321         case ANEG_STATE_COMPLETE_ACK:
5322                 if (ap->ability_match != 0 &&
5323                     ap->rxconfig == 0) {
5324                         ap->state = ANEG_STATE_AN_ENABLE;
5325                         break;
5326                 }
5327                 delta = ap->cur_time - ap->link_time;
5328                 if (delta > ANEG_STATE_SETTLE_TIME) {
5329                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5330                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5331                         } else {
5332                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5333                                     !(ap->flags & MR_NP_RX)) {
5334                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5335                                 } else {
5336                                         ret = ANEG_FAILED;
5337                                 }
5338                         }
5339                 }
5340                 break;
5341
5342         case ANEG_STATE_IDLE_DETECT_INIT:
5343                 ap->link_time = ap->cur_time;
5344                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5345                 tw32_f(MAC_MODE, tp->mac_mode);
5346                 udelay(40);
5347
5348                 ap->state = ANEG_STATE_IDLE_DETECT;
5349                 ret = ANEG_TIMER_ENAB;
5350                 break;
5351
5352         case ANEG_STATE_IDLE_DETECT:
5353                 if (ap->ability_match != 0 &&
5354                     ap->rxconfig == 0) {
5355                         ap->state = ANEG_STATE_AN_ENABLE;
5356                         break;
5357                 }
5358                 delta = ap->cur_time - ap->link_time;
5359                 if (delta > ANEG_STATE_SETTLE_TIME) {
5360                         /* XXX another gem from the Broadcom driver :( */
5361                         ap->state = ANEG_STATE_LINK_OK;
5362                 }
5363                 break;
5364
5365         case ANEG_STATE_LINK_OK:
5366                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5367                 ret = ANEG_DONE;
5368                 break;
5369
5370         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5371                 /* ??? unimplemented */
5372                 break;
5373
5374         case ANEG_STATE_NEXT_PAGE_WAIT:
5375                 /* ??? unimplemented */
5376                 break;
5377
5378         default:
5379                 ret = ANEG_FAILED;
5380                 break;
5381         }
5382
5383         return ret;
5384 }
5385
5386 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5387 {
5388         int res = 0;
5389         struct tg3_fiber_aneginfo aninfo;
5390         int status = ANEG_FAILED;
5391         unsigned int tick;
5392         u32 tmp;
5393
5394         tw32_f(MAC_TX_AUTO_NEG, 0);
5395
5396         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5397         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5398         udelay(40);
5399
5400         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5401         udelay(40);
5402
5403         memset(&aninfo, 0, sizeof(aninfo));
5404         aninfo.flags |= MR_AN_ENABLE;
5405         aninfo.state = ANEG_STATE_UNKNOWN;
5406         aninfo.cur_time = 0;
5407         tick = 0;
5408         while (++tick < 195000) {
5409                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5410                 if (status == ANEG_DONE || status == ANEG_FAILED)
5411                         break;
5412
5413                 udelay(1);
5414         }
5415
5416         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5417         tw32_f(MAC_MODE, tp->mac_mode);
5418         udelay(40);
5419
5420         *txflags = aninfo.txconfig;
5421         *rxflags = aninfo.flags;
5422
5423         if (status == ANEG_DONE &&
5424             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5425                              MR_LP_ADV_FULL_DUPLEX)))
5426                 res = 1;
5427
5428         return res;
5429 }
5430
5431 static void tg3_init_bcm8002(struct tg3 *tp)
5432 {
5433         u32 mac_status = tr32(MAC_STATUS);
5434         int i;
5435
5436         /* Reset when initting first time or we have a link. */
5437         if (tg3_flag(tp, INIT_COMPLETE) &&
5438             !(mac_status & MAC_STATUS_PCS_SYNCED))
5439                 return;
5440
5441         /* Set PLL lock range. */
5442         tg3_writephy(tp, 0x16, 0x8007);
5443
5444         /* SW reset */
5445         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5446
5447         /* Wait for reset to complete. */
5448         /* XXX schedule_timeout() ... */
5449         for (i = 0; i < 500; i++)
5450                 udelay(10);
5451
5452         /* Config mode; select PMA/Ch 1 regs. */
5453         tg3_writephy(tp, 0x10, 0x8411);
5454
5455         /* Enable auto-lock and comdet, select txclk for tx. */
5456         tg3_writephy(tp, 0x11, 0x0a10);
5457
5458         tg3_writephy(tp, 0x18, 0x00a0);
5459         tg3_writephy(tp, 0x16, 0x41ff);
5460
5461         /* Assert and deassert POR. */
5462         tg3_writephy(tp, 0x13, 0x0400);
5463         udelay(40);
5464         tg3_writephy(tp, 0x13, 0x0000);
5465
5466         tg3_writephy(tp, 0x11, 0x0a50);
5467         udelay(40);
5468         tg3_writephy(tp, 0x11, 0x0a10);
5469
5470         /* Wait for signal to stabilize */
5471         /* XXX schedule_timeout() ... */
5472         for (i = 0; i < 15000; i++)
5473                 udelay(10);
5474
5475         /* Deselect the channel register so we can read the PHYID
5476          * later.
5477          */
5478         tg3_writephy(tp, 0x10, 0x8011);
5479 }
5480
5481 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5482 {
5483         u16 flowctrl;
5484         bool current_link_up;
5485         u32 sg_dig_ctrl, sg_dig_status;
5486         u32 serdes_cfg, expected_sg_dig_ctrl;
5487         int workaround, port_a;
5488
5489         serdes_cfg = 0;
5490         expected_sg_dig_ctrl = 0;
5491         workaround = 0;
5492         port_a = 1;
5493         current_link_up = false;
5494
5495         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5496             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5497                 workaround = 1;
5498                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5499                         port_a = 0;
5500
5501                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5502                 /* preserve bits 20-23 for voltage regulator */
5503                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5504         }
5505
5506         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5507
5508         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5509                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5510                         if (workaround) {
5511                                 u32 val = serdes_cfg;
5512
5513                                 if (port_a)
5514                                         val |= 0xc010000;
5515                                 else
5516                                         val |= 0x4010000;
5517                                 tw32_f(MAC_SERDES_CFG, val);
5518                         }
5519
5520                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5521                 }
5522                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5523                         tg3_setup_flow_control(tp, 0, 0);
5524                         current_link_up = true;
5525                 }
5526                 goto out;
5527         }
5528
5529         /* Want auto-negotiation.  */
5530         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5531
5532         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5533         if (flowctrl & ADVERTISE_1000XPAUSE)
5534                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5535         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5536                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5537
5538         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5539                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5540                     tp->serdes_counter &&
5541                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5542                                     MAC_STATUS_RCVD_CFG)) ==
5543                      MAC_STATUS_PCS_SYNCED)) {
5544                         tp->serdes_counter--;
5545                         current_link_up = true;
5546                         goto out;
5547                 }
5548 restart_autoneg:
5549                 if (workaround)
5550                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5551                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5552                 udelay(5);
5553                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5554
5555                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5556                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5557         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5558                                  MAC_STATUS_SIGNAL_DET)) {
5559                 sg_dig_status = tr32(SG_DIG_STATUS);
5560                 mac_status = tr32(MAC_STATUS);
5561
5562                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5563                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5564                         u32 local_adv = 0, remote_adv = 0;
5565
5566                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5567                                 local_adv |= ADVERTISE_1000XPAUSE;
5568                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5569                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5570
5571                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5572                                 remote_adv |= LPA_1000XPAUSE;
5573                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5574                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5575
5576                         tp->link_config.rmt_adv =
5577                                            mii_adv_to_ethtool_adv_x(remote_adv);
5578
5579                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5580                         current_link_up = true;
5581                         tp->serdes_counter = 0;
5582                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5583                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5584                         if (tp->serdes_counter)
5585                                 tp->serdes_counter--;
5586                         else {
5587                                 if (workaround) {
5588                                         u32 val = serdes_cfg;
5589
5590                                         if (port_a)
5591                                                 val |= 0xc010000;
5592                                         else
5593                                                 val |= 0x4010000;
5594
5595                                         tw32_f(MAC_SERDES_CFG, val);
5596                                 }
5597
5598                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5599                                 udelay(40);
5600
5601                                 /* Link parallel detection - link is up */
5602                                 /* only if we have PCS_SYNC and not */
5603                                 /* receiving config code words */
5604                                 mac_status = tr32(MAC_STATUS);
5605                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5606                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5607                                         tg3_setup_flow_control(tp, 0, 0);
5608                                         current_link_up = true;
5609                                         tp->phy_flags |=
5610                                                 TG3_PHYFLG_PARALLEL_DETECT;
5611                                         tp->serdes_counter =
5612                                                 SERDES_PARALLEL_DET_TIMEOUT;
5613                                 } else
5614                                         goto restart_autoneg;
5615                         }
5616                 }
5617         } else {
5618                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5619                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5620         }
5621
5622 out:
5623         return current_link_up;
5624 }
5625
5626 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5627 {
5628         bool current_link_up = false;
5629
5630         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5631                 goto out;
5632
5633         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5634                 u32 txflags, rxflags;
5635                 int i;
5636
5637                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5638                         u32 local_adv = 0, remote_adv = 0;
5639
5640                         if (txflags & ANEG_CFG_PS1)
5641                                 local_adv |= ADVERTISE_1000XPAUSE;
5642                         if (txflags & ANEG_CFG_PS2)
5643                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5644
5645                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5646                                 remote_adv |= LPA_1000XPAUSE;
5647                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5648                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5649
5650                         tp->link_config.rmt_adv =
5651                                            mii_adv_to_ethtool_adv_x(remote_adv);
5652
5653                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5654
5655                         current_link_up = true;
5656                 }
5657                 for (i = 0; i < 30; i++) {
5658                         udelay(20);
5659                         tw32_f(MAC_STATUS,
5660                                (MAC_STATUS_SYNC_CHANGED |
5661                                 MAC_STATUS_CFG_CHANGED));
5662                         udelay(40);
5663                         if ((tr32(MAC_STATUS) &
5664                              (MAC_STATUS_SYNC_CHANGED |
5665                               MAC_STATUS_CFG_CHANGED)) == 0)
5666                                 break;
5667                 }
5668
5669                 mac_status = tr32(MAC_STATUS);
5670                 if (!current_link_up &&
5671                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5672                     !(mac_status & MAC_STATUS_RCVD_CFG))
5673                         current_link_up = true;
5674         } else {
5675                 tg3_setup_flow_control(tp, 0, 0);
5676
5677                 /* Forcing 1000FD link up. */
5678                 current_link_up = true;
5679
5680                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5681                 udelay(40);
5682
5683                 tw32_f(MAC_MODE, tp->mac_mode);
5684                 udelay(40);
5685         }
5686
5687 out:
5688         return current_link_up;
5689 }
5690
5691 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5692 {
5693         u32 orig_pause_cfg;
5694         u16 orig_active_speed;
5695         u8 orig_active_duplex;
5696         u32 mac_status;
5697         bool current_link_up;
5698         int i;
5699
5700         orig_pause_cfg = tp->link_config.active_flowctrl;
5701         orig_active_speed = tp->link_config.active_speed;
5702         orig_active_duplex = tp->link_config.active_duplex;
5703
5704         if (!tg3_flag(tp, HW_AUTONEG) &&
5705             tp->link_up &&
5706             tg3_flag(tp, INIT_COMPLETE)) {
5707                 mac_status = tr32(MAC_STATUS);
5708                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5709                                MAC_STATUS_SIGNAL_DET |
5710                                MAC_STATUS_CFG_CHANGED |
5711                                MAC_STATUS_RCVD_CFG);
5712                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5713                                    MAC_STATUS_SIGNAL_DET)) {
5714                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5715                                             MAC_STATUS_CFG_CHANGED));
5716                         return 0;
5717                 }
5718         }
5719
5720         tw32_f(MAC_TX_AUTO_NEG, 0);
5721
5722         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5723         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5724         tw32_f(MAC_MODE, tp->mac_mode);
5725         udelay(40);
5726
5727         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5728                 tg3_init_bcm8002(tp);
5729
5730         /* Enable link change event even when serdes polling.  */
5731         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5732         udelay(40);
5733
5734         current_link_up = false;
5735         tp->link_config.rmt_adv = 0;
5736         mac_status = tr32(MAC_STATUS);
5737
5738         if (tg3_flag(tp, HW_AUTONEG))
5739                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5740         else
5741                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5742
5743         tp->napi[0].hw_status->status =
5744                 (SD_STATUS_UPDATED |
5745                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5746
5747         for (i = 0; i < 100; i++) {
5748                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5749                                     MAC_STATUS_CFG_CHANGED));
5750                 udelay(5);
5751                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5752                                          MAC_STATUS_CFG_CHANGED |
5753                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5754                         break;
5755         }
5756
5757         mac_status = tr32(MAC_STATUS);
5758         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5759                 current_link_up = false;
5760                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5761                     tp->serdes_counter == 0) {
5762                         tw32_f(MAC_MODE, (tp->mac_mode |
5763                                           MAC_MODE_SEND_CONFIGS));
5764                         udelay(1);
5765                         tw32_f(MAC_MODE, tp->mac_mode);
5766                 }
5767         }
5768
5769         if (current_link_up) {
5770                 tp->link_config.active_speed = SPEED_1000;
5771                 tp->link_config.active_duplex = DUPLEX_FULL;
5772                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5773                                     LED_CTRL_LNKLED_OVERRIDE |
5774                                     LED_CTRL_1000MBPS_ON));
5775         } else {
5776                 tp->link_config.active_speed = SPEED_UNKNOWN;
5777                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5778                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5779                                     LED_CTRL_LNKLED_OVERRIDE |
5780                                     LED_CTRL_TRAFFIC_OVERRIDE));
5781         }
5782
5783         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5784                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5785                 if (orig_pause_cfg != now_pause_cfg ||
5786                     orig_active_speed != tp->link_config.active_speed ||
5787                     orig_active_duplex != tp->link_config.active_duplex)
5788                         tg3_link_report(tp);
5789         }
5790
5791         return 0;
5792 }
5793
5794 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5795 {
5796         int err = 0;
5797         u32 bmsr, bmcr;
5798         u16 current_speed = SPEED_UNKNOWN;
5799         u8 current_duplex = DUPLEX_UNKNOWN;
5800         bool current_link_up = false;
5801         u32 local_adv, remote_adv, sgsr;
5802
5803         if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5804              tg3_asic_rev(tp) == ASIC_REV_5720) &&
5805              !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5806              (sgsr & SERDES_TG3_SGMII_MODE)) {
5807
5808                 if (force_reset)
5809                         tg3_phy_reset(tp);
5810
5811                 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5812
5813                 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5814                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5815                 } else {
5816                         current_link_up = true;
5817                         if (sgsr & SERDES_TG3_SPEED_1000) {
5818                                 current_speed = SPEED_1000;
5819                                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5820                         } else if (sgsr & SERDES_TG3_SPEED_100) {
5821                                 current_speed = SPEED_100;
5822                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5823                         } else {
5824                                 current_speed = SPEED_10;
5825                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5826                         }
5827
5828                         if (sgsr & SERDES_TG3_FULL_DUPLEX)
5829                                 current_duplex = DUPLEX_FULL;
5830                         else
5831                                 current_duplex = DUPLEX_HALF;
5832                 }
5833
5834                 tw32_f(MAC_MODE, tp->mac_mode);
5835                 udelay(40);
5836
5837                 tg3_clear_mac_status(tp);
5838
5839                 goto fiber_setup_done;
5840         }
5841
5842         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5843         tw32_f(MAC_MODE, tp->mac_mode);
5844         udelay(40);
5845
5846         tg3_clear_mac_status(tp);
5847
5848         if (force_reset)
5849                 tg3_phy_reset(tp);
5850
5851         tp->link_config.rmt_adv = 0;
5852
5853         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5854         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5855         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5856                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5857                         bmsr |= BMSR_LSTATUS;
5858                 else
5859                         bmsr &= ~BMSR_LSTATUS;
5860         }
5861
5862         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5863
5864         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5865             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5866                 /* do nothing, just check for link up at the end */
5867         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5868                 u32 adv, newadv;
5869
5870                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5871                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5872                                  ADVERTISE_1000XPAUSE |
5873                                  ADVERTISE_1000XPSE_ASYM |
5874                                  ADVERTISE_SLCT);
5875
5876                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5877                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5878
5879                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5880                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5881                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5882                         tg3_writephy(tp, MII_BMCR, bmcr);
5883
5884                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5885                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5886                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5887
5888                         return err;
5889                 }
5890         } else {
5891                 u32 new_bmcr;
5892
5893                 bmcr &= ~BMCR_SPEED1000;
5894                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5895
5896                 if (tp->link_config.duplex == DUPLEX_FULL)
5897                         new_bmcr |= BMCR_FULLDPLX;
5898
5899                 if (new_bmcr != bmcr) {
5900                         /* BMCR_SPEED1000 is a reserved bit that needs
5901                          * to be set on write.
5902                          */
5903                         new_bmcr |= BMCR_SPEED1000;
5904
5905                         /* Force a linkdown */
5906                         if (tp->link_up) {
5907                                 u32 adv;
5908
5909                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5910                                 adv &= ~(ADVERTISE_1000XFULL |
5911                                          ADVERTISE_1000XHALF |
5912                                          ADVERTISE_SLCT);
5913                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5914                                 tg3_writephy(tp, MII_BMCR, bmcr |
5915                                                            BMCR_ANRESTART |
5916                                                            BMCR_ANENABLE);
5917                                 udelay(10);
5918                                 tg3_carrier_off(tp);
5919                         }
5920                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5921                         bmcr = new_bmcr;
5922                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5923                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5924                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5925                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5926                                         bmsr |= BMSR_LSTATUS;
5927                                 else
5928                                         bmsr &= ~BMSR_LSTATUS;
5929                         }
5930                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5931                 }
5932         }
5933
5934         if (bmsr & BMSR_LSTATUS) {
5935                 current_speed = SPEED_1000;
5936                 current_link_up = true;
5937                 if (bmcr & BMCR_FULLDPLX)
5938                         current_duplex = DUPLEX_FULL;
5939                 else
5940                         current_duplex = DUPLEX_HALF;
5941
5942                 local_adv = 0;
5943                 remote_adv = 0;
5944
5945                 if (bmcr & BMCR_ANENABLE) {
5946                         u32 common;
5947
5948                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5949                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5950                         common = local_adv & remote_adv;
5951                         if (common & (ADVERTISE_1000XHALF |
5952                                       ADVERTISE_1000XFULL)) {
5953                                 if (common & ADVERTISE_1000XFULL)
5954                                         current_duplex = DUPLEX_FULL;
5955                                 else
5956                                         current_duplex = DUPLEX_HALF;
5957
5958                                 tp->link_config.rmt_adv =
5959                                            mii_adv_to_ethtool_adv_x(remote_adv);
5960                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5961                                 /* Link is up via parallel detect */
5962                         } else {
5963                                 current_link_up = false;
5964                         }
5965                 }
5966         }
5967
5968 fiber_setup_done:
5969         if (current_link_up && current_duplex == DUPLEX_FULL)
5970                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5971
5972         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5973         if (tp->link_config.active_duplex == DUPLEX_HALF)
5974                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5975
5976         tw32_f(MAC_MODE, tp->mac_mode);
5977         udelay(40);
5978
5979         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5980
5981         tp->link_config.active_speed = current_speed;
5982         tp->link_config.active_duplex = current_duplex;
5983
5984         tg3_test_and_report_link_chg(tp, current_link_up);
5985         return err;
5986 }
5987
5988 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5989 {
5990         if (tp->serdes_counter) {
5991                 /* Give autoneg time to complete. */
5992                 tp->serdes_counter--;
5993                 return;
5994         }
5995
5996         if (!tp->link_up &&
5997             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5998                 u32 bmcr;
5999
6000                 tg3_readphy(tp, MII_BMCR, &bmcr);
6001                 if (bmcr & BMCR_ANENABLE) {
6002                         u32 phy1, phy2;
6003
6004                         /* Select shadow register 0x1f */
6005                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6006                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6007
6008                         /* Select expansion interrupt status register */
6009                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6010                                          MII_TG3_DSP_EXP1_INT_STAT);
6011                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6012                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6013
6014                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6015                                 /* We have signal detect and not receiving
6016                                  * config code words, link is up by parallel
6017                                  * detection.
6018                                  */
6019
6020                                 bmcr &= ~BMCR_ANENABLE;
6021                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6022                                 tg3_writephy(tp, MII_BMCR, bmcr);
6023                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6024                         }
6025                 }
6026         } else if (tp->link_up &&
6027                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6028                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6029                 u32 phy2;
6030
6031                 /* Select expansion interrupt status register */
6032                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6033                                  MII_TG3_DSP_EXP1_INT_STAT);
6034                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6035                 if (phy2 & 0x20) {
6036                         u32 bmcr;
6037
6038                         /* Config code words received, turn on autoneg. */
6039                         tg3_readphy(tp, MII_BMCR, &bmcr);
6040                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6041
6042                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6043
6044                 }
6045         }
6046 }
6047
6048 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6049 {
6050         u32 val;
6051         int err;
6052
6053         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6054                 err = tg3_setup_fiber_phy(tp, force_reset);
6055         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6056                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6057         else
6058                 err = tg3_setup_copper_phy(tp, force_reset);
6059
6060         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6061                 u32 scale;
6062
6063                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6064                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6065                         scale = 65;
6066                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6067                         scale = 6;
6068                 else
6069                         scale = 12;
6070
6071                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6072                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6073                 tw32(GRC_MISC_CFG, val);
6074         }
6075
6076         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6077               (6 << TX_LENGTHS_IPG_SHIFT);
6078         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6079             tg3_asic_rev(tp) == ASIC_REV_5762)
6080                 val |= tr32(MAC_TX_LENGTHS) &
6081                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
6082                         TX_LENGTHS_CNT_DWN_VAL_MSK);
6083
6084         if (tp->link_config.active_speed == SPEED_1000 &&
6085             tp->link_config.active_duplex == DUPLEX_HALF)
6086                 tw32(MAC_TX_LENGTHS, val |
6087                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6088         else
6089                 tw32(MAC_TX_LENGTHS, val |
6090                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6091
6092         if (!tg3_flag(tp, 5705_PLUS)) {
6093                 if (tp->link_up) {
6094                         tw32(HOSTCC_STAT_COAL_TICKS,
6095                              tp->coal.stats_block_coalesce_usecs);
6096                 } else {
6097                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
6098                 }
6099         }
6100
6101         if (tg3_flag(tp, ASPM_WORKAROUND)) {
6102                 val = tr32(PCIE_PWR_MGMT_THRESH);
6103                 if (!tp->link_up)
6104                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6105                               tp->pwrmgmt_thresh;
6106                 else
6107                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6108                 tw32(PCIE_PWR_MGMT_THRESH, val);
6109         }
6110
6111         return err;
6112 }
6113
6114 /* tp->lock must be held */
6115 static u64 tg3_refclk_read(struct tg3 *tp)
6116 {
6117         u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6118         return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6119 }
6120
6121 /* tp->lock must be held */
6122 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6123 {
6124         u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6125
6126         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6127         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6128         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6129         tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6130 }
6131
6132 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6133 static inline void tg3_full_unlock(struct tg3 *tp);
6134 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6135 {
6136         struct tg3 *tp = netdev_priv(dev);
6137
6138         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6139                                 SOF_TIMESTAMPING_RX_SOFTWARE |
6140                                 SOF_TIMESTAMPING_SOFTWARE;
6141
6142         if (tg3_flag(tp, PTP_CAPABLE)) {
6143                 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6144                                         SOF_TIMESTAMPING_RX_HARDWARE |
6145                                         SOF_TIMESTAMPING_RAW_HARDWARE;
6146         }
6147
6148         if (tp->ptp_clock)
6149                 info->phc_index = ptp_clock_index(tp->ptp_clock);
6150         else
6151                 info->phc_index = -1;
6152
6153         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6154
6155         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6156                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6157                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6158                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6159         return 0;
6160 }
6161
6162 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6163 {
6164         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6165         bool neg_adj = false;
6166         u32 correction = 0;
6167
6168         if (ppb < 0) {
6169                 neg_adj = true;
6170                 ppb = -ppb;
6171         }
6172
6173         /* Frequency adjustment is performed using hardware with a 24 bit
6174          * accumulator and a programmable correction value. On each clk, the
6175          * correction value gets added to the accumulator and when it
6176          * overflows, the time counter is incremented/decremented.
6177          *
6178          * So conversion from ppb to correction value is
6179          *              ppb * (1 << 24) / 1000000000
6180          */
6181         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6182                      TG3_EAV_REF_CLK_CORRECT_MASK;
6183
6184         tg3_full_lock(tp, 0);
6185
6186         if (correction)
6187                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6188                      TG3_EAV_REF_CLK_CORRECT_EN |
6189                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6190         else
6191                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6192
6193         tg3_full_unlock(tp);
6194
6195         return 0;
6196 }
6197
6198 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6199 {
6200         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6201
6202         tg3_full_lock(tp, 0);
6203         tp->ptp_adjust += delta;
6204         tg3_full_unlock(tp);
6205
6206         return 0;
6207 }
6208
6209 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
6210 {
6211         u64 ns;
6212         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6213
6214         tg3_full_lock(tp, 0);
6215         ns = tg3_refclk_read(tp);
6216         ns += tp->ptp_adjust;
6217         tg3_full_unlock(tp);
6218
6219         *ts = ns_to_timespec64(ns);
6220
6221         return 0;
6222 }
6223
6224 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6225                            const struct timespec64 *ts)
6226 {
6227         u64 ns;
6228         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6229
6230         ns = timespec64_to_ns(ts);
6231
6232         tg3_full_lock(tp, 0);
6233         tg3_refclk_write(tp, ns);
6234         tp->ptp_adjust = 0;
6235         tg3_full_unlock(tp);
6236
6237         return 0;
6238 }
6239
6240 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6241                           struct ptp_clock_request *rq, int on)
6242 {
6243         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6244         u32 clock_ctl;
6245         int rval = 0;
6246
6247         switch (rq->type) {
6248         case PTP_CLK_REQ_PEROUT:
6249                 if (rq->perout.index != 0)
6250                         return -EINVAL;
6251
6252                 tg3_full_lock(tp, 0);
6253                 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6254                 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6255
6256                 if (on) {
6257                         u64 nsec;
6258
6259                         nsec = rq->perout.start.sec * 1000000000ULL +
6260                                rq->perout.start.nsec;
6261
6262                         if (rq->perout.period.sec || rq->perout.period.nsec) {
6263                                 netdev_warn(tp->dev,
6264                                             "Device supports only a one-shot timesync output, period must be 0\n");
6265                                 rval = -EINVAL;
6266                                 goto err_out;
6267                         }
6268
6269                         if (nsec & (1ULL << 63)) {
6270                                 netdev_warn(tp->dev,
6271                                             "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6272                                 rval = -EINVAL;
6273                                 goto err_out;
6274                         }
6275
6276                         tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6277                         tw32(TG3_EAV_WATCHDOG0_MSB,
6278                              TG3_EAV_WATCHDOG0_EN |
6279                              ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6280
6281                         tw32(TG3_EAV_REF_CLCK_CTL,
6282                              clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6283                 } else {
6284                         tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6285                         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6286                 }
6287
6288 err_out:
6289                 tg3_full_unlock(tp);
6290                 return rval;
6291
6292         default:
6293                 break;
6294         }
6295
6296         return -EOPNOTSUPP;
6297 }
6298
6299 static const struct ptp_clock_info tg3_ptp_caps = {
6300         .owner          = THIS_MODULE,
6301         .name           = "tg3 clock",
6302         .max_adj        = 250000000,
6303         .n_alarm        = 0,
6304         .n_ext_ts       = 0,
6305         .n_per_out      = 1,
6306         .n_pins         = 0,
6307         .pps            = 0,
6308         .adjfreq        = tg3_ptp_adjfreq,
6309         .adjtime        = tg3_ptp_adjtime,
6310         .gettime64      = tg3_ptp_gettime,
6311         .settime64      = tg3_ptp_settime,
6312         .enable         = tg3_ptp_enable,
6313 };
6314
6315 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6316                                      struct skb_shared_hwtstamps *timestamp)
6317 {
6318         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6319         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6320                                            tp->ptp_adjust);
6321 }
6322
6323 /* tp->lock must be held */
6324 static void tg3_ptp_init(struct tg3 *tp)
6325 {
6326         if (!tg3_flag(tp, PTP_CAPABLE))
6327                 return;
6328
6329         /* Initialize the hardware clock to the system time. */
6330         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6331         tp->ptp_adjust = 0;
6332         tp->ptp_info = tg3_ptp_caps;
6333 }
6334
6335 /* tp->lock must be held */
6336 static void tg3_ptp_resume(struct tg3 *tp)
6337 {
6338         if (!tg3_flag(tp, PTP_CAPABLE))
6339                 return;
6340
6341         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6342         tp->ptp_adjust = 0;
6343 }
6344
6345 static void tg3_ptp_fini(struct tg3 *tp)
6346 {
6347         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6348                 return;
6349
6350         ptp_clock_unregister(tp->ptp_clock);
6351         tp->ptp_clock = NULL;
6352         tp->ptp_adjust = 0;
6353 }
6354
6355 static inline int tg3_irq_sync(struct tg3 *tp)
6356 {
6357         return tp->irq_sync;
6358 }
6359
6360 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6361 {
6362         int i;
6363
6364         dst = (u32 *)((u8 *)dst + off);
6365         for (i = 0; i < len; i += sizeof(u32))
6366                 *dst++ = tr32(off + i);
6367 }
6368
6369 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6370 {
6371         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6372         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6373         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6374         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6375         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6376         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6377         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6378         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6379         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6380         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6381         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6382         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6383         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6384         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6385         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6386         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6387         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6388         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6389         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6390
6391         if (tg3_flag(tp, SUPPORT_MSIX))
6392                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6393
6394         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6395         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6396         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6397         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6398         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6399         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6400         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6401         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6402
6403         if (!tg3_flag(tp, 5705_PLUS)) {
6404                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6405                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6406                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6407         }
6408
6409         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6410         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6411         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6412         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6413         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6414
6415         if (tg3_flag(tp, NVRAM))
6416                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6417 }
6418
6419 static void tg3_dump_state(struct tg3 *tp)
6420 {
6421         int i;
6422         u32 *regs;
6423
6424         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6425         if (!regs)
6426                 return;
6427
6428         if (tg3_flag(tp, PCI_EXPRESS)) {
6429                 /* Read up to but not including private PCI registers */
6430                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6431                         regs[i / sizeof(u32)] = tr32(i);
6432         } else
6433                 tg3_dump_legacy_regs(tp, regs);
6434
6435         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6436                 if (!regs[i + 0] && !regs[i + 1] &&
6437                     !regs[i + 2] && !regs[i + 3])
6438                         continue;
6439
6440                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6441                            i * 4,
6442                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6443         }
6444
6445         kfree(regs);
6446
6447         for (i = 0; i < tp->irq_cnt; i++) {
6448                 struct tg3_napi *tnapi = &tp->napi[i];
6449
6450                 /* SW status block */
6451                 netdev_err(tp->dev,
6452                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6453                            i,
6454                            tnapi->hw_status->status,
6455                            tnapi->hw_status->status_tag,
6456                            tnapi->hw_status->rx_jumbo_consumer,
6457                            tnapi->hw_status->rx_consumer,
6458                            tnapi->hw_status->rx_mini_consumer,
6459                            tnapi->hw_status->idx[0].rx_producer,
6460                            tnapi->hw_status->idx[0].tx_consumer);
6461
6462                 netdev_err(tp->dev,
6463                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6464                            i,
6465                            tnapi->last_tag, tnapi->last_irq_tag,
6466                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6467                            tnapi->rx_rcb_ptr,
6468                            tnapi->prodring.rx_std_prod_idx,
6469                            tnapi->prodring.rx_std_cons_idx,
6470                            tnapi->prodring.rx_jmb_prod_idx,
6471                            tnapi->prodring.rx_jmb_cons_idx);
6472         }
6473 }
6474
6475 /* This is called whenever we suspect that the system chipset is re-
6476  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6477  * is bogus tx completions. We try to recover by setting the
6478  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6479  * in the workqueue.
6480  */
6481 static void tg3_tx_recover(struct tg3 *tp)
6482 {
6483         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6484                tp->write32_tx_mbox == tg3_write_indirect_mbox);
6485
6486         netdev_warn(tp->dev,
6487                     "The system may be re-ordering memory-mapped I/O "
6488                     "cycles to the network device, attempting to recover. "
6489                     "Please report the problem to the driver maintainer "
6490                     "and include system chipset information.\n");
6491
6492         tg3_flag_set(tp, TX_RECOVERY_PENDING);
6493 }
6494
6495 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6496 {
6497         /* Tell compiler to fetch tx indices from memory. */
6498         barrier();
6499         return tnapi->tx_pending -
6500                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6501 }
6502
6503 /* Tigon3 never reports partial packet sends.  So we do not
6504  * need special logic to handle SKBs that have not had all
6505  * of their frags sent yet, like SunGEM does.
6506  */
6507 static void tg3_tx(struct tg3_napi *tnapi)
6508 {
6509         struct tg3 *tp = tnapi->tp;
6510         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6511         u32 sw_idx = tnapi->tx_cons;
6512         struct netdev_queue *txq;
6513         int index = tnapi - tp->napi;
6514         unsigned int pkts_compl = 0, bytes_compl = 0;
6515
6516         if (tg3_flag(tp, ENABLE_TSS))
6517                 index--;
6518
6519         txq = netdev_get_tx_queue(tp->dev, index);
6520
6521         while (sw_idx != hw_idx) {
6522                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6523                 struct sk_buff *skb = ri->skb;
6524                 int i, tx_bug = 0;
6525
6526                 if (unlikely(skb == NULL)) {
6527                         tg3_tx_recover(tp);
6528                         return;
6529                 }
6530
6531                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6532                         struct skb_shared_hwtstamps timestamp;
6533                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6534                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6535
6536                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6537
6538                         skb_tstamp_tx(skb, &timestamp);
6539                 }
6540
6541                 pci_unmap_single(tp->pdev,
6542                                  dma_unmap_addr(ri, mapping),
6543                                  skb_headlen(skb),
6544                                  PCI_DMA_TODEVICE);
6545
6546                 ri->skb = NULL;
6547
6548                 while (ri->fragmented) {
6549                         ri->fragmented = false;
6550                         sw_idx = NEXT_TX(sw_idx);
6551                         ri = &tnapi->tx_buffers[sw_idx];
6552                 }
6553
6554                 sw_idx = NEXT_TX(sw_idx);
6555
6556                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6557                         ri = &tnapi->tx_buffers[sw_idx];
6558                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6559                                 tx_bug = 1;
6560
6561                         pci_unmap_page(tp->pdev,
6562                                        dma_unmap_addr(ri, mapping),
6563                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
6564                                        PCI_DMA_TODEVICE);
6565
6566                         while (ri->fragmented) {
6567                                 ri->fragmented = false;
6568                                 sw_idx = NEXT_TX(sw_idx);
6569                                 ri = &tnapi->tx_buffers[sw_idx];
6570                         }
6571
6572                         sw_idx = NEXT_TX(sw_idx);
6573                 }
6574
6575                 pkts_compl++;
6576                 bytes_compl += skb->len;
6577
6578                 dev_kfree_skb_any(skb);
6579
6580                 if (unlikely(tx_bug)) {
6581                         tg3_tx_recover(tp);
6582                         return;
6583                 }
6584         }
6585
6586         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6587
6588         tnapi->tx_cons = sw_idx;
6589
6590         /* Need to make the tx_cons update visible to tg3_start_xmit()
6591          * before checking for netif_queue_stopped().  Without the
6592          * memory barrier, there is a small possibility that tg3_start_xmit()
6593          * will miss it and cause the queue to be stopped forever.
6594          */
6595         smp_mb();
6596
6597         if (unlikely(netif_tx_queue_stopped(txq) &&
6598                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6599                 __netif_tx_lock(txq, smp_processor_id());
6600                 if (netif_tx_queue_stopped(txq) &&
6601                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6602                         netif_tx_wake_queue(txq);
6603                 __netif_tx_unlock(txq);
6604         }
6605 }
6606
6607 static void tg3_frag_free(bool is_frag, void *data)
6608 {
6609         if (is_frag)
6610                 skb_free_frag(data);
6611         else
6612                 kfree(data);
6613 }
6614
6615 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6616 {
6617         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6618                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6619
6620         if (!ri->data)
6621                 return;
6622
6623         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6624                          map_sz, PCI_DMA_FROMDEVICE);
6625         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6626         ri->data = NULL;
6627 }
6628
6629
6630 /* Returns size of skb allocated or < 0 on error.
6631  *
6632  * We only need to fill in the address because the other members
6633  * of the RX descriptor are invariant, see tg3_init_rings.
6634  *
6635  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6636  * posting buffers we only dirty the first cache line of the RX
6637  * descriptor (containing the address).  Whereas for the RX status
6638  * buffers the cpu only reads the last cacheline of the RX descriptor
6639  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6640  */
6641 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6642                              u32 opaque_key, u32 dest_idx_unmasked,
6643                              unsigned int *frag_size)
6644 {
6645         struct tg3_rx_buffer_desc *desc;
6646         struct ring_info *map;
6647         u8 *data;
6648         dma_addr_t mapping;
6649         int skb_size, data_size, dest_idx;
6650
6651         switch (opaque_key) {
6652         case RXD_OPAQUE_RING_STD:
6653                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6654                 desc = &tpr->rx_std[dest_idx];
6655                 map = &tpr->rx_std_buffers[dest_idx];
6656                 data_size = tp->rx_pkt_map_sz;
6657                 break;
6658
6659         case RXD_OPAQUE_RING_JUMBO:
6660                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6661                 desc = &tpr->rx_jmb[dest_idx].std;
6662                 map = &tpr->rx_jmb_buffers[dest_idx];
6663                 data_size = TG3_RX_JMB_MAP_SZ;
6664                 break;
6665
6666         default:
6667                 return -EINVAL;
6668         }
6669
6670         /* Do not overwrite any of the map or rp information
6671          * until we are sure we can commit to a new buffer.
6672          *
6673          * Callers depend upon this behavior and assume that
6674          * we leave everything unchanged if we fail.
6675          */
6676         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6677                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6678         if (skb_size <= PAGE_SIZE) {
6679                 data = netdev_alloc_frag(skb_size);
6680                 *frag_size = skb_size;
6681         } else {
6682                 data = kmalloc(skb_size, GFP_ATOMIC);
6683                 *frag_size = 0;
6684         }
6685         if (!data)
6686                 return -ENOMEM;
6687
6688         mapping = pci_map_single(tp->pdev,
6689                                  data + TG3_RX_OFFSET(tp),
6690                                  data_size,
6691                                  PCI_DMA_FROMDEVICE);
6692         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6693                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6694                 return -EIO;
6695         }
6696
6697         map->data = data;
6698         dma_unmap_addr_set(map, mapping, mapping);
6699
6700         desc->addr_hi = ((u64)mapping >> 32);
6701         desc->addr_lo = ((u64)mapping & 0xffffffff);
6702
6703         return data_size;
6704 }
6705
6706 /* We only need to move over in the address because the other
6707  * members of the RX descriptor are invariant.  See notes above
6708  * tg3_alloc_rx_data for full details.
6709  */
6710 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6711                            struct tg3_rx_prodring_set *dpr,
6712                            u32 opaque_key, int src_idx,
6713                            u32 dest_idx_unmasked)
6714 {
6715         struct tg3 *tp = tnapi->tp;
6716         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6717         struct ring_info *src_map, *dest_map;
6718         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6719         int dest_idx;
6720
6721         switch (opaque_key) {
6722         case RXD_OPAQUE_RING_STD:
6723                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6724                 dest_desc = &dpr->rx_std[dest_idx];
6725                 dest_map = &dpr->rx_std_buffers[dest_idx];
6726                 src_desc = &spr->rx_std[src_idx];
6727                 src_map = &spr->rx_std_buffers[src_idx];
6728                 break;
6729
6730         case RXD_OPAQUE_RING_JUMBO:
6731                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6732                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6733                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6734                 src_desc = &spr->rx_jmb[src_idx].std;
6735                 src_map = &spr->rx_jmb_buffers[src_idx];
6736                 break;
6737
6738         default:
6739                 return;
6740         }
6741
6742         dest_map->data = src_map->data;
6743         dma_unmap_addr_set(dest_map, mapping,
6744                            dma_unmap_addr(src_map, mapping));
6745         dest_desc->addr_hi = src_desc->addr_hi;
6746         dest_desc->addr_lo = src_desc->addr_lo;
6747
6748         /* Ensure that the update to the skb happens after the physical
6749          * addresses have been transferred to the new BD location.
6750          */
6751         smp_wmb();
6752
6753         src_map->data = NULL;
6754 }
6755
6756 /* The RX ring scheme is composed of multiple rings which post fresh
6757  * buffers to the chip, and one special ring the chip uses to report
6758  * status back to the host.
6759  *
6760  * The special ring reports the status of received packets to the
6761  * host.  The chip does not write into the original descriptor the
6762  * RX buffer was obtained from.  The chip simply takes the original
6763  * descriptor as provided by the host, updates the status and length
6764  * field, then writes this into the next status ring entry.
6765  *
6766  * Each ring the host uses to post buffers to the chip is described
6767  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6768  * it is first placed into the on-chip ram.  When the packet's length
6769  * is known, it walks down the TG3_BDINFO entries to select the ring.
6770  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6771  * which is within the range of the new packet's length is chosen.
6772  *
6773  * The "separate ring for rx status" scheme may sound queer, but it makes
6774  * sense from a cache coherency perspective.  If only the host writes
6775  * to the buffer post rings, and only the chip writes to the rx status
6776  * rings, then cache lines never move beyond shared-modified state.
6777  * If both the host and chip were to write into the same ring, cache line
6778  * eviction could occur since both entities want it in an exclusive state.
6779  */
6780 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6781 {
6782         struct tg3 *tp = tnapi->tp;
6783         u32 work_mask, rx_std_posted = 0;
6784         u32 std_prod_idx, jmb_prod_idx;
6785         u32 sw_idx = tnapi->rx_rcb_ptr;
6786         u16 hw_idx;
6787         int received;
6788         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6789
6790         hw_idx = *(tnapi->rx_rcb_prod_idx);
6791         /*
6792          * We need to order the read of hw_idx and the read of
6793          * the opaque cookie.
6794          */
6795         rmb();
6796         work_mask = 0;
6797         received = 0;
6798         std_prod_idx = tpr->rx_std_prod_idx;
6799         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6800         while (sw_idx != hw_idx && budget > 0) {
6801                 struct ring_info *ri;
6802                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6803                 unsigned int len;
6804                 struct sk_buff *skb;
6805                 dma_addr_t dma_addr;
6806                 u32 opaque_key, desc_idx, *post_ptr;
6807                 u8 *data;
6808                 u64 tstamp = 0;
6809
6810                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6811                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6812                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6813                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6814                         dma_addr = dma_unmap_addr(ri, mapping);
6815                         data = ri->data;
6816                         post_ptr = &std_prod_idx;
6817                         rx_std_posted++;
6818                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6819                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6820                         dma_addr = dma_unmap_addr(ri, mapping);
6821                         data = ri->data;
6822                         post_ptr = &jmb_prod_idx;
6823                 } else
6824                         goto next_pkt_nopost;
6825
6826                 work_mask |= opaque_key;
6827
6828                 if (desc->err_vlan & RXD_ERR_MASK) {
6829                 drop_it:
6830                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6831                                        desc_idx, *post_ptr);
6832                 drop_it_no_recycle:
6833                         /* Other statistics kept track of by card. */
6834                         tp->rx_dropped++;
6835                         goto next_pkt;
6836                 }
6837
6838                 prefetch(data + TG3_RX_OFFSET(tp));
6839                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6840                       ETH_FCS_LEN;
6841
6842                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6843                      RXD_FLAG_PTPSTAT_PTPV1 ||
6844                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6845                      RXD_FLAG_PTPSTAT_PTPV2) {
6846                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6847                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6848                 }
6849
6850                 if (len > TG3_RX_COPY_THRESH(tp)) {
6851                         int skb_size;
6852                         unsigned int frag_size;
6853
6854                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6855                                                     *post_ptr, &frag_size);
6856                         if (skb_size < 0)
6857                                 goto drop_it;
6858
6859                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6860                                          PCI_DMA_FROMDEVICE);
6861
6862                         /* Ensure that the update to the data happens
6863                          * after the usage of the old DMA mapping.
6864                          */
6865                         smp_wmb();
6866
6867                         ri->data = NULL;
6868
6869                         skb = build_skb(data, frag_size);
6870                         if (!skb) {
6871                                 tg3_frag_free(frag_size != 0, data);
6872                                 goto drop_it_no_recycle;
6873                         }
6874                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6875                 } else {
6876                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6877                                        desc_idx, *post_ptr);
6878
6879                         skb = netdev_alloc_skb(tp->dev,
6880                                                len + TG3_RAW_IP_ALIGN);
6881                         if (skb == NULL)
6882                                 goto drop_it_no_recycle;
6883
6884                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6885                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6886                         memcpy(skb->data,
6887                                data + TG3_RX_OFFSET(tp),
6888                                len);
6889                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6890                 }
6891
6892                 skb_put(skb, len);
6893                 if (tstamp)
6894                         tg3_hwclock_to_timestamp(tp, tstamp,
6895                                                  skb_hwtstamps(skb));
6896
6897                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6898                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6899                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6900                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6901                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6902                 else
6903                         skb_checksum_none_assert(skb);
6904
6905                 skb->protocol = eth_type_trans(skb, tp->dev);
6906
6907                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6908                     skb->protocol != htons(ETH_P_8021Q) &&
6909                     skb->protocol != htons(ETH_P_8021AD)) {
6910                         dev_kfree_skb_any(skb);
6911                         goto drop_it_no_recycle;
6912                 }
6913
6914                 if (desc->type_flags & RXD_FLAG_VLAN &&
6915                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6916                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6917                                                desc->err_vlan & RXD_VLAN_MASK);
6918
6919                 napi_gro_receive(&tnapi->napi, skb);
6920
6921                 received++;
6922                 budget--;
6923
6924 next_pkt:
6925                 (*post_ptr)++;
6926
6927                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6928                         tpr->rx_std_prod_idx = std_prod_idx &
6929                                                tp->rx_std_ring_mask;
6930                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6931                                      tpr->rx_std_prod_idx);
6932                         work_mask &= ~RXD_OPAQUE_RING_STD;
6933                         rx_std_posted = 0;
6934                 }
6935 next_pkt_nopost:
6936                 sw_idx++;
6937                 sw_idx &= tp->rx_ret_ring_mask;
6938
6939                 /* Refresh hw_idx to see if there is new work */
6940                 if (sw_idx == hw_idx) {
6941                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6942                         rmb();
6943                 }
6944         }
6945
6946         /* ACK the status ring. */
6947         tnapi->rx_rcb_ptr = sw_idx;
6948         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6949
6950         /* Refill RX ring(s). */
6951         if (!tg3_flag(tp, ENABLE_RSS)) {
6952                 /* Sync BD data before updating mailbox */
6953                 wmb();
6954
6955                 if (work_mask & RXD_OPAQUE_RING_STD) {
6956                         tpr->rx_std_prod_idx = std_prod_idx &
6957                                                tp->rx_std_ring_mask;
6958                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6959                                      tpr->rx_std_prod_idx);
6960                 }
6961                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6962                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6963                                                tp->rx_jmb_ring_mask;
6964                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6965                                      tpr->rx_jmb_prod_idx);
6966                 }
6967                 mmiowb();
6968         } else if (work_mask) {
6969                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6970                  * updated before the producer indices can be updated.
6971                  */
6972                 smp_wmb();
6973
6974                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6975                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6976
6977                 if (tnapi != &tp->napi[1]) {
6978                         tp->rx_refill = true;
6979                         napi_schedule(&tp->napi[1].napi);
6980                 }
6981         }
6982
6983         return received;
6984 }
6985
6986 static void tg3_poll_link(struct tg3 *tp)
6987 {
6988         /* handle link change and other phy events */
6989         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6990                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6991
6992                 if (sblk->status & SD_STATUS_LINK_CHG) {
6993                         sblk->status = SD_STATUS_UPDATED |
6994                                        (sblk->status & ~SD_STATUS_LINK_CHG);
6995                         spin_lock(&tp->lock);
6996                         if (tg3_flag(tp, USE_PHYLIB)) {
6997                                 tw32_f(MAC_STATUS,
6998                                      (MAC_STATUS_SYNC_CHANGED |
6999                                       MAC_STATUS_CFG_CHANGED |
7000                                       MAC_STATUS_MI_COMPLETION |
7001                                       MAC_STATUS_LNKSTATE_CHANGED));
7002                                 udelay(40);
7003                         } else
7004                                 tg3_setup_phy(tp, false);
7005                         spin_unlock(&tp->lock);
7006                 }
7007         }
7008 }
7009
7010 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7011                                 struct tg3_rx_prodring_set *dpr,
7012                                 struct tg3_rx_prodring_set *spr)
7013 {
7014         u32 si, di, cpycnt, src_prod_idx;
7015         int i, err = 0;
7016
7017         while (1) {
7018                 src_prod_idx = spr->rx_std_prod_idx;
7019
7020                 /* Make sure updates to the rx_std_buffers[] entries and the
7021                  * standard producer index are seen in the correct order.
7022                  */
7023                 smp_rmb();
7024
7025                 if (spr->rx_std_cons_idx == src_prod_idx)
7026                         break;
7027
7028                 if (spr->rx_std_cons_idx < src_prod_idx)
7029                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7030                 else
7031                         cpycnt = tp->rx_std_ring_mask + 1 -
7032                                  spr->rx_std_cons_idx;
7033
7034                 cpycnt = min(cpycnt,
7035                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7036
7037                 si = spr->rx_std_cons_idx;
7038                 di = dpr->rx_std_prod_idx;
7039
7040                 for (i = di; i < di + cpycnt; i++) {
7041                         if (dpr->rx_std_buffers[i].data) {
7042                                 cpycnt = i - di;
7043                                 err = -ENOSPC;
7044                                 break;
7045                         }
7046                 }
7047
7048                 if (!cpycnt)
7049                         break;
7050
7051                 /* Ensure that updates to the rx_std_buffers ring and the
7052                  * shadowed hardware producer ring from tg3_recycle_skb() are
7053                  * ordered correctly WRT the skb check above.
7054                  */
7055                 smp_rmb();
7056
7057                 memcpy(&dpr->rx_std_buffers[di],
7058                        &spr->rx_std_buffers[si],
7059                        cpycnt * sizeof(struct ring_info));
7060
7061                 for (i = 0; i < cpycnt; i++, di++, si++) {
7062                         struct tg3_rx_buffer_desc *sbd, *dbd;
7063                         sbd = &spr->rx_std[si];
7064                         dbd = &dpr->rx_std[di];
7065                         dbd->addr_hi = sbd->addr_hi;
7066                         dbd->addr_lo = sbd->addr_lo;
7067                 }
7068
7069                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7070                                        tp->rx_std_ring_mask;
7071                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7072                                        tp->rx_std_ring_mask;
7073         }
7074
7075         while (1) {
7076                 src_prod_idx = spr->rx_jmb_prod_idx;
7077
7078                 /* Make sure updates to the rx_jmb_buffers[] entries and
7079                  * the jumbo producer index are seen in the correct order.
7080                  */
7081                 smp_rmb();
7082
7083                 if (spr->rx_jmb_cons_idx == src_prod_idx)
7084                         break;
7085
7086                 if (spr->rx_jmb_cons_idx < src_prod_idx)
7087                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7088                 else
7089                         cpycnt = tp->rx_jmb_ring_mask + 1 -
7090                                  spr->rx_jmb_cons_idx;
7091
7092                 cpycnt = min(cpycnt,
7093                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7094
7095                 si = spr->rx_jmb_cons_idx;
7096                 di = dpr->rx_jmb_prod_idx;
7097
7098                 for (i = di; i < di + cpycnt; i++) {
7099                         if (dpr->rx_jmb_buffers[i].data) {
7100                                 cpycnt = i - di;
7101                                 err = -ENOSPC;
7102                                 break;
7103                         }
7104                 }
7105
7106                 if (!cpycnt)
7107                         break;
7108
7109                 /* Ensure that updates to the rx_jmb_buffers ring and the
7110                  * shadowed hardware producer ring from tg3_recycle_skb() are
7111                  * ordered correctly WRT the skb check above.
7112                  */
7113                 smp_rmb();
7114
7115                 memcpy(&dpr->rx_jmb_buffers[di],
7116                        &spr->rx_jmb_buffers[si],
7117                        cpycnt * sizeof(struct ring_info));
7118
7119                 for (i = 0; i < cpycnt; i++, di++, si++) {
7120                         struct tg3_rx_buffer_desc *sbd, *dbd;
7121                         sbd = &spr->rx_jmb[si].std;
7122                         dbd = &dpr->rx_jmb[di].std;
7123                         dbd->addr_hi = sbd->addr_hi;
7124                         dbd->addr_lo = sbd->addr_lo;
7125                 }
7126
7127                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7128                                        tp->rx_jmb_ring_mask;
7129                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7130                                        tp->rx_jmb_ring_mask;
7131         }
7132
7133         return err;
7134 }
7135
7136 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7137 {
7138         struct tg3 *tp = tnapi->tp;
7139
7140         /* run TX completion thread */
7141         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7142                 tg3_tx(tnapi);
7143                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7144                         return work_done;
7145         }
7146
7147         if (!tnapi->rx_rcb_prod_idx)
7148                 return work_done;
7149
7150         /* run RX thread, within the bounds set by NAPI.
7151          * All RX "locking" is done by ensuring outside
7152          * code synchronizes with tg3->napi.poll()
7153          */
7154         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7155                 work_done += tg3_rx(tnapi, budget - work_done);
7156
7157         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7158                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7159                 int i, err = 0;
7160                 u32 std_prod_idx = dpr->rx_std_prod_idx;
7161                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7162
7163                 tp->rx_refill = false;
7164                 for (i = 1; i <= tp->rxq_cnt; i++)
7165                         err |= tg3_rx_prodring_xfer(tp, dpr,
7166                                                     &tp->napi[i].prodring);
7167
7168                 wmb();
7169
7170                 if (std_prod_idx != dpr->rx_std_prod_idx)
7171                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7172                                      dpr->rx_std_prod_idx);
7173
7174                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7175                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7176                                      dpr->rx_jmb_prod_idx);
7177
7178                 mmiowb();
7179
7180                 if (err)
7181                         tw32_f(HOSTCC_MODE, tp->coal_now);
7182         }
7183
7184         return work_done;
7185 }
7186
7187 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7188 {
7189         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7190                 schedule_work(&tp->reset_task);
7191 }
7192
7193 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7194 {
7195         if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7196                 cancel_work_sync(&tp->reset_task);
7197         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7198 }
7199
7200 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7201 {
7202         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7203         struct tg3 *tp = tnapi->tp;
7204         int work_done = 0;
7205         struct tg3_hw_status *sblk = tnapi->hw_status;
7206
7207         while (1) {
7208                 work_done = tg3_poll_work(tnapi, work_done, budget);
7209
7210                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7211                         goto tx_recovery;
7212
7213                 if (unlikely(work_done >= budget))
7214                         break;
7215
7216                 /* tp->last_tag is used in tg3_int_reenable() below
7217                  * to tell the hw how much work has been processed,
7218                  * so we must read it before checking for more work.
7219                  */
7220                 tnapi->last_tag = sblk->status_tag;
7221                 tnapi->last_irq_tag = tnapi->last_tag;
7222                 rmb();
7223
7224                 /* check for RX/TX work to do */
7225                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7226                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7227
7228                         /* This test here is not race free, but will reduce
7229                          * the number of interrupts by looping again.
7230                          */
7231                         if (tnapi == &tp->napi[1] && tp->rx_refill)
7232                                 continue;
7233
7234                         napi_complete_done(napi, work_done);
7235                         /* Reenable interrupts. */
7236                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7237
7238                         /* This test here is synchronized by napi_schedule()
7239                          * and napi_complete() to close the race condition.
7240                          */
7241                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7242                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
7243                                                   HOSTCC_MODE_ENABLE |
7244                                                   tnapi->coal_now);
7245                         }
7246                         mmiowb();
7247                         break;
7248                 }
7249         }
7250
7251         return work_done;
7252
7253 tx_recovery:
7254         /* work_done is guaranteed to be less than budget. */
7255         napi_complete(napi);
7256         tg3_reset_task_schedule(tp);
7257         return work_done;
7258 }
7259
7260 static void tg3_process_error(struct tg3 *tp)
7261 {
7262         u32 val;
7263         bool real_error = false;
7264
7265         if (tg3_flag(tp, ERROR_PROCESSED))
7266                 return;
7267
7268         /* Check Flow Attention register */
7269         val = tr32(HOSTCC_FLOW_ATTN);
7270         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7271                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7272                 real_error = true;
7273         }
7274
7275         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7276                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7277                 real_error = true;
7278         }
7279
7280         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7281                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7282                 real_error = true;
7283         }
7284
7285         if (!real_error)
7286                 return;
7287
7288         tg3_dump_state(tp);
7289
7290         tg3_flag_set(tp, ERROR_PROCESSED);
7291         tg3_reset_task_schedule(tp);
7292 }
7293
7294 static int tg3_poll(struct napi_struct *napi, int budget)
7295 {
7296         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7297         struct tg3 *tp = tnapi->tp;
7298         int work_done = 0;
7299         struct tg3_hw_status *sblk = tnapi->hw_status;
7300
7301         while (1) {
7302                 if (sblk->status & SD_STATUS_ERROR)
7303                         tg3_process_error(tp);
7304
7305                 tg3_poll_link(tp);
7306
7307                 work_done = tg3_poll_work(tnapi, work_done, budget);
7308
7309                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7310                         goto tx_recovery;
7311
7312                 if (unlikely(work_done >= budget))
7313                         break;
7314
7315                 if (tg3_flag(tp, TAGGED_STATUS)) {
7316                         /* tp->last_tag is used in tg3_int_reenable() below
7317                          * to tell the hw how much work has been processed,
7318                          * so we must read it before checking for more work.
7319                          */
7320                         tnapi->last_tag = sblk->status_tag;
7321                         tnapi->last_irq_tag = tnapi->last_tag;
7322                         rmb();
7323                 } else
7324                         sblk->status &= ~SD_STATUS_UPDATED;
7325
7326                 if (likely(!tg3_has_work(tnapi))) {
7327                         napi_complete_done(napi, work_done);
7328                         tg3_int_reenable(tnapi);
7329                         break;
7330                 }
7331         }
7332
7333         return work_done;
7334
7335 tx_recovery:
7336         /* work_done is guaranteed to be less than budget. */
7337         napi_complete(napi);
7338         tg3_reset_task_schedule(tp);
7339         return work_done;
7340 }
7341
7342 static void tg3_napi_disable(struct tg3 *tp)
7343 {
7344         int i;
7345
7346         for (i = tp->irq_cnt - 1; i >= 0; i--)
7347                 napi_disable(&tp->napi[i].napi);
7348 }
7349
7350 static void tg3_napi_enable(struct tg3 *tp)
7351 {
7352         int i;
7353
7354         for (i = 0; i < tp->irq_cnt; i++)
7355                 napi_enable(&tp->napi[i].napi);
7356 }
7357
7358 static void tg3_napi_init(struct tg3 *tp)
7359 {
7360         int i;
7361
7362         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7363         for (i = 1; i < tp->irq_cnt; i++)
7364                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7365 }
7366
7367 static void tg3_napi_fini(struct tg3 *tp)
7368 {
7369         int i;
7370
7371         for (i = 0; i < tp->irq_cnt; i++)
7372                 netif_napi_del(&tp->napi[i].napi);
7373 }
7374
7375 static inline void tg3_netif_stop(struct tg3 *tp)
7376 {
7377         netif_trans_update(tp->dev);    /* prevent tx timeout */
7378         tg3_napi_disable(tp);
7379         netif_carrier_off(tp->dev);
7380         netif_tx_disable(tp->dev);
7381 }
7382
7383 /* tp->lock must be held */
7384 static inline void tg3_netif_start(struct tg3 *tp)
7385 {
7386         tg3_ptp_resume(tp);
7387
7388         /* NOTE: unconditional netif_tx_wake_all_queues is only
7389          * appropriate so long as all callers are assured to
7390          * have free tx slots (such as after tg3_init_hw)
7391          */
7392         netif_tx_wake_all_queues(tp->dev);
7393
7394         if (tp->link_up)
7395                 netif_carrier_on(tp->dev);
7396
7397         tg3_napi_enable(tp);
7398         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7399         tg3_enable_ints(tp);
7400 }
7401
7402 static void tg3_irq_quiesce(struct tg3 *tp)
7403         __releases(tp->lock)
7404         __acquires(tp->lock)
7405 {
7406         int i;
7407
7408         BUG_ON(tp->irq_sync);
7409
7410         tp->irq_sync = 1;
7411         smp_mb();
7412
7413         spin_unlock_bh(&tp->lock);
7414
7415         for (i = 0; i < tp->irq_cnt; i++)
7416                 synchronize_irq(tp->napi[i].irq_vec);
7417
7418         spin_lock_bh(&tp->lock);
7419 }
7420
7421 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7422  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7423  * with as well.  Most of the time, this is not necessary except when
7424  * shutting down the device.
7425  */
7426 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7427 {
7428         spin_lock_bh(&tp->lock);
7429         if (irq_sync)
7430                 tg3_irq_quiesce(tp);
7431 }
7432
7433 static inline void tg3_full_unlock(struct tg3 *tp)
7434 {
7435         spin_unlock_bh(&tp->lock);
7436 }
7437
7438 /* One-shot MSI handler - Chip automatically disables interrupt
7439  * after sending MSI so driver doesn't have to do it.
7440  */
7441 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7442 {
7443         struct tg3_napi *tnapi = dev_id;
7444         struct tg3 *tp = tnapi->tp;
7445
7446         prefetch(tnapi->hw_status);
7447         if (tnapi->rx_rcb)
7448                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7449
7450         if (likely(!tg3_irq_sync(tp)))
7451                 napi_schedule(&tnapi->napi);
7452
7453         return IRQ_HANDLED;
7454 }
7455
7456 /* MSI ISR - No need to check for interrupt sharing and no need to
7457  * flush status block and interrupt mailbox. PCI ordering rules
7458  * guarantee that MSI will arrive after the status block.
7459  */
7460 static irqreturn_t tg3_msi(int irq, void *dev_id)
7461 {
7462         struct tg3_napi *tnapi = dev_id;
7463         struct tg3 *tp = tnapi->tp;
7464
7465         prefetch(tnapi->hw_status);
7466         if (tnapi->rx_rcb)
7467                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7468         /*
7469          * Writing any value to intr-mbox-0 clears PCI INTA# and
7470          * chip-internal interrupt pending events.
7471          * Writing non-zero to intr-mbox-0 additional tells the
7472          * NIC to stop sending us irqs, engaging "in-intr-handler"
7473          * event coalescing.
7474          */
7475         tw32_mailbox(tnapi->int_mbox, 0x00000001);
7476         if (likely(!tg3_irq_sync(tp)))
7477                 napi_schedule(&tnapi->napi);
7478
7479         return IRQ_RETVAL(1);
7480 }
7481
7482 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7483 {
7484         struct tg3_napi *tnapi = dev_id;
7485         struct tg3 *tp = tnapi->tp;
7486         struct tg3_hw_status *sblk = tnapi->hw_status;
7487         unsigned int handled = 1;
7488
7489         /* In INTx mode, it is possible for the interrupt to arrive at
7490          * the CPU before the status block posted prior to the interrupt.
7491          * Reading the PCI State register will confirm whether the
7492          * interrupt is ours and will flush the status block.
7493          */
7494         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7495                 if (tg3_flag(tp, CHIP_RESETTING) ||
7496                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7497                         handled = 0;
7498                         goto out;
7499                 }
7500         }
7501
7502         /*
7503          * Writing any value to intr-mbox-0 clears PCI INTA# and
7504          * chip-internal interrupt pending events.
7505          * Writing non-zero to intr-mbox-0 additional tells the
7506          * NIC to stop sending us irqs, engaging "in-intr-handler"
7507          * event coalescing.
7508          *
7509          * Flush the mailbox to de-assert the IRQ immediately to prevent
7510          * spurious interrupts.  The flush impacts performance but
7511          * excessive spurious interrupts can be worse in some cases.
7512          */
7513         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7514         if (tg3_irq_sync(tp))
7515                 goto out;
7516         sblk->status &= ~SD_STATUS_UPDATED;
7517         if (likely(tg3_has_work(tnapi))) {
7518                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7519                 napi_schedule(&tnapi->napi);
7520         } else {
7521                 /* No work, shared interrupt perhaps?  re-enable
7522                  * interrupts, and flush that PCI write
7523                  */
7524                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7525                                0x00000000);
7526         }
7527 out:
7528         return IRQ_RETVAL(handled);
7529 }
7530
7531 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7532 {
7533         struct tg3_napi *tnapi = dev_id;
7534         struct tg3 *tp = tnapi->tp;
7535         struct tg3_hw_status *sblk = tnapi->hw_status;
7536         unsigned int handled = 1;
7537
7538         /* In INTx mode, it is possible for the interrupt to arrive at
7539          * the CPU before the status block posted prior to the interrupt.
7540          * Reading the PCI State register will confirm whether the
7541          * interrupt is ours and will flush the status block.
7542          */
7543         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7544                 if (tg3_flag(tp, CHIP_RESETTING) ||
7545                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7546                         handled = 0;
7547                         goto out;
7548                 }
7549         }
7550
7551         /*
7552          * writing any value to intr-mbox-0 clears PCI INTA# and
7553          * chip-internal interrupt pending events.
7554          * writing non-zero to intr-mbox-0 additional tells the
7555          * NIC to stop sending us irqs, engaging "in-intr-handler"
7556          * event coalescing.
7557          *
7558          * Flush the mailbox to de-assert the IRQ immediately to prevent
7559          * spurious interrupts.  The flush impacts performance but
7560          * excessive spurious interrupts can be worse in some cases.
7561          */
7562         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7563
7564         /*
7565          * In a shared interrupt configuration, sometimes other devices'
7566          * interrupts will scream.  We record the current status tag here
7567          * so that the above check can report that the screaming interrupts
7568          * are unhandled.  Eventually they will be silenced.
7569          */
7570         tnapi->last_irq_tag = sblk->status_tag;
7571
7572         if (tg3_irq_sync(tp))
7573                 goto out;
7574
7575         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7576
7577         napi_schedule(&tnapi->napi);
7578
7579 out:
7580         return IRQ_RETVAL(handled);
7581 }
7582
7583 /* ISR for interrupt test */
7584 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7585 {
7586         struct tg3_napi *tnapi = dev_id;
7587         struct tg3 *tp = tnapi->tp;
7588         struct tg3_hw_status *sblk = tnapi->hw_status;
7589
7590         if ((sblk->status & SD_STATUS_UPDATED) ||
7591             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7592                 tg3_disable_ints(tp);
7593                 return IRQ_RETVAL(1);
7594         }
7595         return IRQ_RETVAL(0);
7596 }
7597
7598 #ifdef CONFIG_NET_POLL_CONTROLLER
7599 static void tg3_poll_controller(struct net_device *dev)
7600 {
7601         int i;
7602         struct tg3 *tp = netdev_priv(dev);
7603
7604         if (tg3_irq_sync(tp))
7605                 return;
7606
7607         for (i = 0; i < tp->irq_cnt; i++)
7608                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7609 }
7610 #endif
7611
7612 static void tg3_tx_timeout(struct net_device *dev)
7613 {
7614         struct tg3 *tp = netdev_priv(dev);
7615
7616         if (netif_msg_tx_err(tp)) {
7617                 netdev_err(dev, "transmit timed out, resetting\n");
7618                 tg3_dump_state(tp);
7619         }
7620
7621         tg3_reset_task_schedule(tp);
7622 }
7623
7624 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7625 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7626 {
7627         u32 base = (u32) mapping & 0xffffffff;
7628
7629         return base + len + 8 < base;
7630 }
7631
7632 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7633  * of any 4GB boundaries: 4G, 8G, etc
7634  */
7635 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7636                                            u32 len, u32 mss)
7637 {
7638         if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7639                 u32 base = (u32) mapping & 0xffffffff;
7640
7641                 return ((base + len + (mss & 0x3fff)) < base);
7642         }
7643         return 0;
7644 }
7645
7646 /* Test for DMA addresses > 40-bit */
7647 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7648                                           int len)
7649 {
7650 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7651         if (tg3_flag(tp, 40BIT_DMA_BUG))
7652                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7653         return 0;
7654 #else
7655         return 0;
7656 #endif
7657 }
7658
7659 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7660                                  dma_addr_t mapping, u32 len, u32 flags,
7661                                  u32 mss, u32 vlan)
7662 {
7663         txbd->addr_hi = ((u64) mapping >> 32);
7664         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7665         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7666         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7667 }
7668
7669 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7670                             dma_addr_t map, u32 len, u32 flags,
7671                             u32 mss, u32 vlan)
7672 {
7673         struct tg3 *tp = tnapi->tp;
7674         bool hwbug = false;
7675
7676         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7677                 hwbug = true;
7678
7679         if (tg3_4g_overflow_test(map, len))
7680                 hwbug = true;
7681
7682         if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7683                 hwbug = true;
7684
7685         if (tg3_40bit_overflow_test(tp, map, len))
7686                 hwbug = true;
7687
7688         if (tp->dma_limit) {
7689                 u32 prvidx = *entry;
7690                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7691                 while (len > tp->dma_limit && *budget) {
7692                         u32 frag_len = tp->dma_limit;
7693                         len -= tp->dma_limit;
7694
7695                         /* Avoid the 8byte DMA problem */
7696                         if (len <= 8) {
7697                                 len += tp->dma_limit / 2;
7698                                 frag_len = tp->dma_limit / 2;
7699                         }
7700
7701                         tnapi->tx_buffers[*entry].fragmented = true;
7702
7703                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7704                                       frag_len, tmp_flag, mss, vlan);
7705                         *budget -= 1;
7706                         prvidx = *entry;
7707                         *entry = NEXT_TX(*entry);
7708
7709                         map += frag_len;
7710                 }
7711
7712                 if (len) {
7713                         if (*budget) {
7714                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7715                                               len, flags, mss, vlan);
7716                                 *budget -= 1;
7717                                 *entry = NEXT_TX(*entry);
7718                         } else {
7719                                 hwbug = true;
7720                                 tnapi->tx_buffers[prvidx].fragmented = false;
7721                         }
7722                 }
7723         } else {
7724                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7725                               len, flags, mss, vlan);
7726                 *entry = NEXT_TX(*entry);
7727         }
7728
7729         return hwbug;
7730 }
7731
7732 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7733 {
7734         int i;
7735         struct sk_buff *skb;
7736         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7737
7738         skb = txb->skb;
7739         txb->skb = NULL;
7740
7741         pci_unmap_single(tnapi->tp->pdev,
7742                          dma_unmap_addr(txb, mapping),
7743                          skb_headlen(skb),
7744                          PCI_DMA_TODEVICE);
7745
7746         while (txb->fragmented) {
7747                 txb->fragmented = false;
7748                 entry = NEXT_TX(entry);
7749                 txb = &tnapi->tx_buffers[entry];
7750         }
7751
7752         for (i = 0; i <= last; i++) {
7753                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7754
7755                 entry = NEXT_TX(entry);
7756                 txb = &tnapi->tx_buffers[entry];
7757
7758                 pci_unmap_page(tnapi->tp->pdev,
7759                                dma_unmap_addr(txb, mapping),
7760                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7761
7762                 while (txb->fragmented) {
7763                         txb->fragmented = false;
7764                         entry = NEXT_TX(entry);
7765                         txb = &tnapi->tx_buffers[entry];
7766                 }
7767         }
7768 }
7769
7770 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7771 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7772                                        struct sk_buff **pskb,
7773                                        u32 *entry, u32 *budget,
7774                                        u32 base_flags, u32 mss, u32 vlan)
7775 {
7776         struct tg3 *tp = tnapi->tp;
7777         struct sk_buff *new_skb, *skb = *pskb;
7778         dma_addr_t new_addr = 0;
7779         int ret = 0;
7780
7781         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7782                 new_skb = skb_copy(skb, GFP_ATOMIC);
7783         else {
7784                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7785
7786                 new_skb = skb_copy_expand(skb,
7787                                           skb_headroom(skb) + more_headroom,
7788                                           skb_tailroom(skb), GFP_ATOMIC);
7789         }
7790
7791         if (!new_skb) {
7792                 ret = -1;
7793         } else {
7794                 /* New SKB is guaranteed to be linear. */
7795                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7796                                           PCI_DMA_TODEVICE);
7797                 /* Make sure the mapping succeeded */
7798                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7799                         dev_kfree_skb_any(new_skb);
7800                         ret = -1;
7801                 } else {
7802                         u32 save_entry = *entry;
7803
7804                         base_flags |= TXD_FLAG_END;
7805
7806                         tnapi->tx_buffers[*entry].skb = new_skb;
7807                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7808                                            mapping, new_addr);
7809
7810                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7811                                             new_skb->len, base_flags,
7812                                             mss, vlan)) {
7813                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7814                                 dev_kfree_skb_any(new_skb);
7815                                 ret = -1;
7816                         }
7817                 }
7818         }
7819
7820         dev_kfree_skb_any(skb);
7821         *pskb = new_skb;
7822         return ret;
7823 }
7824
7825 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7826 {
7827         /* Check if we will never have enough descriptors,
7828          * as gso_segs can be more than current ring size
7829          */
7830         return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7831 }
7832
7833 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7834
7835 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7836  * indicated in tg3_tx_frag_set()
7837  */
7838 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7839                        struct netdev_queue *txq, struct sk_buff *skb)
7840 {
7841         struct sk_buff *segs, *nskb;
7842         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7843
7844         /* Estimate the number of fragments in the worst case */
7845         if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7846                 netif_tx_stop_queue(txq);
7847
7848                 /* netif_tx_stop_queue() must be done before checking
7849                  * checking tx index in tg3_tx_avail() below, because in
7850                  * tg3_tx(), we update tx index before checking for
7851                  * netif_tx_queue_stopped().
7852                  */
7853                 smp_mb();
7854                 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7855                         return NETDEV_TX_BUSY;
7856
7857                 netif_tx_wake_queue(txq);
7858         }
7859
7860         segs = skb_gso_segment(skb, tp->dev->features &
7861                                     ~(NETIF_F_TSO | NETIF_F_TSO6));
7862         if (IS_ERR(segs) || !segs)
7863                 goto tg3_tso_bug_end;
7864
7865         do {
7866                 nskb = segs;
7867                 segs = segs->next;
7868                 nskb->next = NULL;
7869                 tg3_start_xmit(nskb, tp->dev);
7870         } while (segs);
7871
7872 tg3_tso_bug_end:
7873         dev_kfree_skb_any(skb);
7874
7875         return NETDEV_TX_OK;
7876 }
7877
7878 /* hard_start_xmit for all devices */
7879 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7880 {
7881         struct tg3 *tp = netdev_priv(dev);
7882         u32 len, entry, base_flags, mss, vlan = 0;
7883         u32 budget;
7884         int i = -1, would_hit_hwbug;
7885         dma_addr_t mapping;
7886         struct tg3_napi *tnapi;
7887         struct netdev_queue *txq;
7888         unsigned int last;
7889         struct iphdr *iph = NULL;
7890         struct tcphdr *tcph = NULL;
7891         __sum16 tcp_csum = 0, ip_csum = 0;
7892         __be16 ip_tot_len = 0;
7893
7894         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7895         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7896         if (tg3_flag(tp, ENABLE_TSS))
7897                 tnapi++;
7898
7899         budget = tg3_tx_avail(tnapi);
7900
7901         /* We are running in BH disabled context with netif_tx_lock
7902          * and TX reclaim runs via tp->napi.poll inside of a software
7903          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7904          * no IRQ context deadlocks to worry about either.  Rejoice!
7905          */
7906         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7907                 if (!netif_tx_queue_stopped(txq)) {
7908                         netif_tx_stop_queue(txq);
7909
7910                         /* This is a hard error, log it. */
7911                         netdev_err(dev,
7912                                    "BUG! Tx Ring full when queue awake!\n");
7913                 }
7914                 return NETDEV_TX_BUSY;
7915         }
7916
7917         entry = tnapi->tx_prod;
7918         base_flags = 0;
7919
7920         mss = skb_shinfo(skb)->gso_size;
7921         if (mss) {
7922                 u32 tcp_opt_len, hdr_len;
7923
7924                 if (skb_cow_head(skb, 0))
7925                         goto drop;
7926
7927                 iph = ip_hdr(skb);
7928                 tcp_opt_len = tcp_optlen(skb);
7929
7930                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7931
7932                 /* HW/FW can not correctly segment packets that have been
7933                  * vlan encapsulated.
7934                  */
7935                 if (skb->protocol == htons(ETH_P_8021Q) ||
7936                     skb->protocol == htons(ETH_P_8021AD)) {
7937                         if (tg3_tso_bug_gso_check(tnapi, skb))
7938                                 return tg3_tso_bug(tp, tnapi, txq, skb);
7939                         goto drop;
7940                 }
7941
7942                 if (!skb_is_gso_v6(skb)) {
7943                         if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7944                             tg3_flag(tp, TSO_BUG)) {
7945                                 if (tg3_tso_bug_gso_check(tnapi, skb))
7946                                         return tg3_tso_bug(tp, tnapi, txq, skb);
7947                                 goto drop;
7948                         }
7949                         ip_csum = iph->check;
7950                         ip_tot_len = iph->tot_len;
7951                         iph->check = 0;
7952                         iph->tot_len = htons(mss + hdr_len);
7953                 }
7954
7955                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7956                                TXD_FLAG_CPU_POST_DMA);
7957
7958                 tcph = tcp_hdr(skb);
7959                 tcp_csum = tcph->check;
7960
7961                 if (tg3_flag(tp, HW_TSO_1) ||
7962                     tg3_flag(tp, HW_TSO_2) ||
7963                     tg3_flag(tp, HW_TSO_3)) {
7964                         tcph->check = 0;
7965                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7966                 } else {
7967                         tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7968                                                          0, IPPROTO_TCP, 0);
7969                 }
7970
7971                 if (tg3_flag(tp, HW_TSO_3)) {
7972                         mss |= (hdr_len & 0xc) << 12;
7973                         if (hdr_len & 0x10)
7974                                 base_flags |= 0x00000010;
7975                         base_flags |= (hdr_len & 0x3e0) << 5;
7976                 } else if (tg3_flag(tp, HW_TSO_2))
7977                         mss |= hdr_len << 9;
7978                 else if (tg3_flag(tp, HW_TSO_1) ||
7979                          tg3_asic_rev(tp) == ASIC_REV_5705) {
7980                         if (tcp_opt_len || iph->ihl > 5) {
7981                                 int tsflags;
7982
7983                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7984                                 mss |= (tsflags << 11);
7985                         }
7986                 } else {
7987                         if (tcp_opt_len || iph->ihl > 5) {
7988                                 int tsflags;
7989
7990                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7991                                 base_flags |= tsflags << 12;
7992                         }
7993                 }
7994         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
7995                 /* HW/FW can not correctly checksum packets that have been
7996                  * vlan encapsulated.
7997                  */
7998                 if (skb->protocol == htons(ETH_P_8021Q) ||
7999                     skb->protocol == htons(ETH_P_8021AD)) {
8000                         if (skb_checksum_help(skb))
8001                                 goto drop;
8002                 } else  {
8003                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
8004                 }
8005         }
8006
8007         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8008             !mss && skb->len > VLAN_ETH_FRAME_LEN)
8009                 base_flags |= TXD_FLAG_JMB_PKT;
8010
8011         if (skb_vlan_tag_present(skb)) {
8012                 base_flags |= TXD_FLAG_VLAN;
8013                 vlan = skb_vlan_tag_get(skb);
8014         }
8015
8016         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8017             tg3_flag(tp, TX_TSTAMP_EN)) {
8018                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8019                 base_flags |= TXD_FLAG_HWTSTAMP;
8020         }
8021
8022         len = skb_headlen(skb);
8023
8024         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8025         if (pci_dma_mapping_error(tp->pdev, mapping))
8026                 goto drop;
8027
8028
8029         tnapi->tx_buffers[entry].skb = skb;
8030         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8031
8032         would_hit_hwbug = 0;
8033
8034         if (tg3_flag(tp, 5701_DMA_BUG))
8035                 would_hit_hwbug = 1;
8036
8037         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8038                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8039                             mss, vlan)) {
8040                 would_hit_hwbug = 1;
8041         } else if (skb_shinfo(skb)->nr_frags > 0) {
8042                 u32 tmp_mss = mss;
8043
8044                 if (!tg3_flag(tp, HW_TSO_1) &&
8045                     !tg3_flag(tp, HW_TSO_2) &&
8046                     !tg3_flag(tp, HW_TSO_3))
8047                         tmp_mss = 0;
8048
8049                 /* Now loop through additional data
8050                  * fragments, and queue them.
8051                  */
8052                 last = skb_shinfo(skb)->nr_frags - 1;
8053                 for (i = 0; i <= last; i++) {
8054                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8055
8056                         len = skb_frag_size(frag);
8057                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8058                                                    len, DMA_TO_DEVICE);
8059
8060                         tnapi->tx_buffers[entry].skb = NULL;
8061                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8062                                            mapping);
8063                         if (dma_mapping_error(&tp->pdev->dev, mapping))
8064                                 goto dma_error;
8065
8066                         if (!budget ||
8067                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8068                                             len, base_flags |
8069                                             ((i == last) ? TXD_FLAG_END : 0),
8070                                             tmp_mss, vlan)) {
8071                                 would_hit_hwbug = 1;
8072                                 break;
8073                         }
8074                 }
8075         }
8076
8077         if (would_hit_hwbug) {
8078                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8079
8080                 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8081                         /* If it's a TSO packet, do GSO instead of
8082                          * allocating and copying to a large linear SKB
8083                          */
8084                         if (ip_tot_len) {
8085                                 iph->check = ip_csum;
8086                                 iph->tot_len = ip_tot_len;
8087                         }
8088                         tcph->check = tcp_csum;
8089                         return tg3_tso_bug(tp, tnapi, txq, skb);
8090                 }
8091
8092                 /* If the workaround fails due to memory/mapping
8093                  * failure, silently drop this packet.
8094                  */
8095                 entry = tnapi->tx_prod;
8096                 budget = tg3_tx_avail(tnapi);
8097                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8098                                                 base_flags, mss, vlan))
8099                         goto drop_nofree;
8100         }
8101
8102         skb_tx_timestamp(skb);
8103         netdev_tx_sent_queue(txq, skb->len);
8104
8105         /* Sync BD data before updating mailbox */
8106         wmb();
8107
8108         tnapi->tx_prod = entry;
8109         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8110                 netif_tx_stop_queue(txq);
8111
8112                 /* netif_tx_stop_queue() must be done before checking
8113                  * checking tx index in tg3_tx_avail() below, because in
8114                  * tg3_tx(), we update tx index before checking for
8115                  * netif_tx_queue_stopped().
8116                  */
8117                 smp_mb();
8118                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8119                         netif_tx_wake_queue(txq);
8120         }
8121
8122         if (!skb->xmit_more || netif_xmit_stopped(txq)) {
8123                 /* Packets are ready, update Tx producer idx on card. */
8124                 tw32_tx_mbox(tnapi->prodmbox, entry);
8125                 mmiowb();
8126         }
8127
8128         return NETDEV_TX_OK;
8129
8130 dma_error:
8131         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8132         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8133 drop:
8134         dev_kfree_skb_any(skb);
8135 drop_nofree:
8136         tp->tx_dropped++;
8137         return NETDEV_TX_OK;
8138 }
8139
8140 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8141 {
8142         if (enable) {
8143                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8144                                   MAC_MODE_PORT_MODE_MASK);
8145
8146                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8147
8148                 if (!tg3_flag(tp, 5705_PLUS))
8149                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8150
8151                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8152                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8153                 else
8154                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8155         } else {
8156                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8157
8158                 if (tg3_flag(tp, 5705_PLUS) ||
8159                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8160                     tg3_asic_rev(tp) == ASIC_REV_5700)
8161                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8162         }
8163
8164         tw32(MAC_MODE, tp->mac_mode);
8165         udelay(40);
8166 }
8167
8168 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8169 {
8170         u32 val, bmcr, mac_mode, ptest = 0;
8171
8172         tg3_phy_toggle_apd(tp, false);
8173         tg3_phy_toggle_automdix(tp, false);
8174
8175         if (extlpbk && tg3_phy_set_extloopbk(tp))
8176                 return -EIO;
8177
8178         bmcr = BMCR_FULLDPLX;
8179         switch (speed) {
8180         case SPEED_10:
8181                 break;
8182         case SPEED_100:
8183                 bmcr |= BMCR_SPEED100;
8184                 break;
8185         case SPEED_1000:
8186         default:
8187                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8188                         speed = SPEED_100;
8189                         bmcr |= BMCR_SPEED100;
8190                 } else {
8191                         speed = SPEED_1000;
8192                         bmcr |= BMCR_SPEED1000;
8193                 }
8194         }
8195
8196         if (extlpbk) {
8197                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8198                         tg3_readphy(tp, MII_CTRL1000, &val);
8199                         val |= CTL1000_AS_MASTER |
8200                                CTL1000_ENABLE_MASTER;
8201                         tg3_writephy(tp, MII_CTRL1000, val);
8202                 } else {
8203                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8204                                 MII_TG3_FET_PTEST_TRIM_2;
8205                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8206                 }
8207         } else
8208                 bmcr |= BMCR_LOOPBACK;
8209
8210         tg3_writephy(tp, MII_BMCR, bmcr);
8211
8212         /* The write needs to be flushed for the FETs */
8213         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8214                 tg3_readphy(tp, MII_BMCR, &bmcr);
8215
8216         udelay(40);
8217
8218         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8219             tg3_asic_rev(tp) == ASIC_REV_5785) {
8220                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8221                              MII_TG3_FET_PTEST_FRC_TX_LINK |
8222                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
8223
8224                 /* The write needs to be flushed for the AC131 */
8225                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8226         }
8227
8228         /* Reset to prevent losing 1st rx packet intermittently */
8229         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8230             tg3_flag(tp, 5780_CLASS)) {
8231                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8232                 udelay(10);
8233                 tw32_f(MAC_RX_MODE, tp->rx_mode);
8234         }
8235
8236         mac_mode = tp->mac_mode &
8237                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8238         if (speed == SPEED_1000)
8239                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8240         else
8241                 mac_mode |= MAC_MODE_PORT_MODE_MII;
8242
8243         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8244                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8245
8246                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8247                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8248                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8249                         mac_mode |= MAC_MODE_LINK_POLARITY;
8250
8251                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8252                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8253         }
8254
8255         tw32(MAC_MODE, mac_mode);
8256         udelay(40);
8257
8258         return 0;
8259 }
8260
8261 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8262 {
8263         struct tg3 *tp = netdev_priv(dev);
8264
8265         if (features & NETIF_F_LOOPBACK) {
8266                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8267                         return;
8268
8269                 spin_lock_bh(&tp->lock);
8270                 tg3_mac_loopback(tp, true);
8271                 netif_carrier_on(tp->dev);
8272                 spin_unlock_bh(&tp->lock);
8273                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8274         } else {
8275                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8276                         return;
8277
8278                 spin_lock_bh(&tp->lock);
8279                 tg3_mac_loopback(tp, false);
8280                 /* Force link status check */
8281                 tg3_setup_phy(tp, true);
8282                 spin_unlock_bh(&tp->lock);
8283                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8284         }
8285 }
8286
8287 static netdev_features_t tg3_fix_features(struct net_device *dev,
8288         netdev_features_t features)
8289 {
8290         struct tg3 *tp = netdev_priv(dev);
8291
8292         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8293                 features &= ~NETIF_F_ALL_TSO;
8294
8295         return features;
8296 }
8297
8298 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8299 {
8300         netdev_features_t changed = dev->features ^ features;
8301
8302         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8303                 tg3_set_loopback(dev, features);
8304
8305         return 0;
8306 }
8307
8308 static void tg3_rx_prodring_free(struct tg3 *tp,
8309                                  struct tg3_rx_prodring_set *tpr)
8310 {
8311         int i;
8312
8313         if (tpr != &tp->napi[0].prodring) {
8314                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8315                      i = (i + 1) & tp->rx_std_ring_mask)
8316                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8317                                         tp->rx_pkt_map_sz);
8318
8319                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8320                         for (i = tpr->rx_jmb_cons_idx;
8321                              i != tpr->rx_jmb_prod_idx;
8322                              i = (i + 1) & tp->rx_jmb_ring_mask) {
8323                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8324                                                 TG3_RX_JMB_MAP_SZ);
8325                         }
8326                 }
8327
8328                 return;
8329         }
8330
8331         for (i = 0; i <= tp->rx_std_ring_mask; i++)
8332                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8333                                 tp->rx_pkt_map_sz);
8334
8335         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8336                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8337                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8338                                         TG3_RX_JMB_MAP_SZ);
8339         }
8340 }
8341
8342 /* Initialize rx rings for packet processing.
8343  *
8344  * The chip has been shut down and the driver detached from
8345  * the networking, so no interrupts or new tx packets will
8346  * end up in the driver.  tp->{tx,}lock are held and thus
8347  * we may not sleep.
8348  */
8349 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8350                                  struct tg3_rx_prodring_set *tpr)
8351 {
8352         u32 i, rx_pkt_dma_sz;
8353
8354         tpr->rx_std_cons_idx = 0;
8355         tpr->rx_std_prod_idx = 0;
8356         tpr->rx_jmb_cons_idx = 0;
8357         tpr->rx_jmb_prod_idx = 0;
8358
8359         if (tpr != &tp->napi[0].prodring) {
8360                 memset(&tpr->rx_std_buffers[0], 0,
8361                        TG3_RX_STD_BUFF_RING_SIZE(tp));
8362                 if (tpr->rx_jmb_buffers)
8363                         memset(&tpr->rx_jmb_buffers[0], 0,
8364                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
8365                 goto done;
8366         }
8367
8368         /* Zero out all descriptors. */
8369         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8370
8371         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8372         if (tg3_flag(tp, 5780_CLASS) &&
8373             tp->dev->mtu > ETH_DATA_LEN)
8374                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8375         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8376
8377         /* Initialize invariants of the rings, we only set this
8378          * stuff once.  This works because the card does not
8379          * write into the rx buffer posting rings.
8380          */
8381         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8382                 struct tg3_rx_buffer_desc *rxd;
8383
8384                 rxd = &tpr->rx_std[i];
8385                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8386                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8387                 rxd->opaque = (RXD_OPAQUE_RING_STD |
8388                                (i << RXD_OPAQUE_INDEX_SHIFT));
8389         }
8390
8391         /* Now allocate fresh SKBs for each rx ring. */
8392         for (i = 0; i < tp->rx_pending; i++) {
8393                 unsigned int frag_size;
8394
8395                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8396                                       &frag_size) < 0) {
8397                         netdev_warn(tp->dev,
8398                                     "Using a smaller RX standard ring. Only "
8399                                     "%d out of %d buffers were allocated "
8400                                     "successfully\n", i, tp->rx_pending);
8401                         if (i == 0)
8402                                 goto initfail;
8403                         tp->rx_pending = i;
8404                         break;
8405                 }
8406         }
8407
8408         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8409                 goto done;
8410
8411         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8412
8413         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8414                 goto done;
8415
8416         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8417                 struct tg3_rx_buffer_desc *rxd;
8418
8419                 rxd = &tpr->rx_jmb[i].std;
8420                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8421                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8422                                   RXD_FLAG_JUMBO;
8423                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8424                        (i << RXD_OPAQUE_INDEX_SHIFT));
8425         }
8426
8427         for (i = 0; i < tp->rx_jumbo_pending; i++) {
8428                 unsigned int frag_size;
8429
8430                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8431                                       &frag_size) < 0) {
8432                         netdev_warn(tp->dev,
8433                                     "Using a smaller RX jumbo ring. Only %d "
8434                                     "out of %d buffers were allocated "
8435                                     "successfully\n", i, tp->rx_jumbo_pending);
8436                         if (i == 0)
8437                                 goto initfail;
8438                         tp->rx_jumbo_pending = i;
8439                         break;
8440                 }
8441         }
8442
8443 done:
8444         return 0;
8445
8446 initfail:
8447         tg3_rx_prodring_free(tp, tpr);
8448         return -ENOMEM;
8449 }
8450
8451 static void tg3_rx_prodring_fini(struct tg3 *tp,
8452                                  struct tg3_rx_prodring_set *tpr)
8453 {
8454         kfree(tpr->rx_std_buffers);
8455         tpr->rx_std_buffers = NULL;
8456         kfree(tpr->rx_jmb_buffers);
8457         tpr->rx_jmb_buffers = NULL;
8458         if (tpr->rx_std) {
8459                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8460                                   tpr->rx_std, tpr->rx_std_mapping);
8461                 tpr->rx_std = NULL;
8462         }
8463         if (tpr->rx_jmb) {
8464                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8465                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
8466                 tpr->rx_jmb = NULL;
8467         }
8468 }
8469
8470 static int tg3_rx_prodring_init(struct tg3 *tp,
8471                                 struct tg3_rx_prodring_set *tpr)
8472 {
8473         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8474                                       GFP_KERNEL);
8475         if (!tpr->rx_std_buffers)
8476                 return -ENOMEM;
8477
8478         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8479                                          TG3_RX_STD_RING_BYTES(tp),
8480                                          &tpr->rx_std_mapping,
8481                                          GFP_KERNEL);
8482         if (!tpr->rx_std)
8483                 goto err_out;
8484
8485         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8486                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8487                                               GFP_KERNEL);
8488                 if (!tpr->rx_jmb_buffers)
8489                         goto err_out;
8490
8491                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8492                                                  TG3_RX_JMB_RING_BYTES(tp),
8493                                                  &tpr->rx_jmb_mapping,
8494                                                  GFP_KERNEL);
8495                 if (!tpr->rx_jmb)
8496                         goto err_out;
8497         }
8498
8499         return 0;
8500
8501 err_out:
8502         tg3_rx_prodring_fini(tp, tpr);
8503         return -ENOMEM;
8504 }
8505
8506 /* Free up pending packets in all rx/tx rings.
8507  *
8508  * The chip has been shut down and the driver detached from
8509  * the networking, so no interrupts or new tx packets will
8510  * end up in the driver.  tp->{tx,}lock is not held and we are not
8511  * in an interrupt context and thus may sleep.
8512  */
8513 static void tg3_free_rings(struct tg3 *tp)
8514 {
8515         int i, j;
8516
8517         for (j = 0; j < tp->irq_cnt; j++) {
8518                 struct tg3_napi *tnapi = &tp->napi[j];
8519
8520                 tg3_rx_prodring_free(tp, &tnapi->prodring);
8521
8522                 if (!tnapi->tx_buffers)
8523                         continue;
8524
8525                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8526                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8527
8528                         if (!skb)
8529                                 continue;
8530
8531                         tg3_tx_skb_unmap(tnapi, i,
8532                                          skb_shinfo(skb)->nr_frags - 1);
8533
8534                         dev_kfree_skb_any(skb);
8535                 }
8536                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8537         }
8538 }
8539
8540 /* Initialize tx/rx rings for packet processing.
8541  *
8542  * The chip has been shut down and the driver detached from
8543  * the networking, so no interrupts or new tx packets will
8544  * end up in the driver.  tp->{tx,}lock are held and thus
8545  * we may not sleep.
8546  */
8547 static int tg3_init_rings(struct tg3 *tp)
8548 {
8549         int i;
8550
8551         /* Free up all the SKBs. */
8552         tg3_free_rings(tp);
8553
8554         for (i = 0; i < tp->irq_cnt; i++) {
8555                 struct tg3_napi *tnapi = &tp->napi[i];
8556
8557                 tnapi->last_tag = 0;
8558                 tnapi->last_irq_tag = 0;
8559                 tnapi->hw_status->status = 0;
8560                 tnapi->hw_status->status_tag = 0;
8561                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8562
8563                 tnapi->tx_prod = 0;
8564                 tnapi->tx_cons = 0;
8565                 if (tnapi->tx_ring)
8566                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8567
8568                 tnapi->rx_rcb_ptr = 0;
8569                 if (tnapi->rx_rcb)
8570                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8571
8572                 if (tnapi->prodring.rx_std &&
8573                     tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8574                         tg3_free_rings(tp);
8575                         return -ENOMEM;
8576                 }
8577         }
8578
8579         return 0;
8580 }
8581
8582 static void tg3_mem_tx_release(struct tg3 *tp)
8583 {
8584         int i;
8585
8586         for (i = 0; i < tp->irq_max; i++) {
8587                 struct tg3_napi *tnapi = &tp->napi[i];
8588
8589                 if (tnapi->tx_ring) {
8590                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8591                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
8592                         tnapi->tx_ring = NULL;
8593                 }
8594
8595                 kfree(tnapi->tx_buffers);
8596                 tnapi->tx_buffers = NULL;
8597         }
8598 }
8599
8600 static int tg3_mem_tx_acquire(struct tg3 *tp)
8601 {
8602         int i;
8603         struct tg3_napi *tnapi = &tp->napi[0];
8604
8605         /* If multivector TSS is enabled, vector 0 does not handle
8606          * tx interrupts.  Don't allocate any resources for it.
8607          */
8608         if (tg3_flag(tp, ENABLE_TSS))
8609                 tnapi++;
8610
8611         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8612                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8613                                             TG3_TX_RING_SIZE, GFP_KERNEL);
8614                 if (!tnapi->tx_buffers)
8615                         goto err_out;
8616
8617                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8618                                                     TG3_TX_RING_BYTES,
8619                                                     &tnapi->tx_desc_mapping,
8620                                                     GFP_KERNEL);
8621                 if (!tnapi->tx_ring)
8622                         goto err_out;
8623         }
8624
8625         return 0;
8626
8627 err_out:
8628         tg3_mem_tx_release(tp);
8629         return -ENOMEM;
8630 }
8631
8632 static void tg3_mem_rx_release(struct tg3 *tp)
8633 {
8634         int i;
8635
8636         for (i = 0; i < tp->irq_max; i++) {
8637                 struct tg3_napi *tnapi = &tp->napi[i];
8638
8639                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8640
8641                 if (!tnapi->rx_rcb)
8642                         continue;
8643
8644                 dma_free_coherent(&tp->pdev->dev,
8645                                   TG3_RX_RCB_RING_BYTES(tp),
8646                                   tnapi->rx_rcb,
8647                                   tnapi->rx_rcb_mapping);
8648                 tnapi->rx_rcb = NULL;
8649         }
8650 }
8651
8652 static int tg3_mem_rx_acquire(struct tg3 *tp)
8653 {
8654         unsigned int i, limit;
8655
8656         limit = tp->rxq_cnt;
8657
8658         /* If RSS is enabled, we need a (dummy) producer ring
8659          * set on vector zero.  This is the true hw prodring.
8660          */
8661         if (tg3_flag(tp, ENABLE_RSS))
8662                 limit++;
8663
8664         for (i = 0; i < limit; i++) {
8665                 struct tg3_napi *tnapi = &tp->napi[i];
8666
8667                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8668                         goto err_out;
8669
8670                 /* If multivector RSS is enabled, vector 0
8671                  * does not handle rx or tx interrupts.
8672                  * Don't allocate any resources for it.
8673                  */
8674                 if (!i && tg3_flag(tp, ENABLE_RSS))
8675                         continue;
8676
8677                 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8678                                                     TG3_RX_RCB_RING_BYTES(tp),
8679                                                     &tnapi->rx_rcb_mapping,
8680                                                     GFP_KERNEL);
8681                 if (!tnapi->rx_rcb)
8682                         goto err_out;
8683         }
8684
8685         return 0;
8686
8687 err_out:
8688         tg3_mem_rx_release(tp);
8689         return -ENOMEM;
8690 }
8691
8692 /*
8693  * Must not be invoked with interrupt sources disabled and
8694  * the hardware shutdown down.
8695  */
8696 static void tg3_free_consistent(struct tg3 *tp)
8697 {
8698         int i;
8699
8700         for (i = 0; i < tp->irq_cnt; i++) {
8701                 struct tg3_napi *tnapi = &tp->napi[i];
8702
8703                 if (tnapi->hw_status) {
8704                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8705                                           tnapi->hw_status,
8706                                           tnapi->status_mapping);
8707                         tnapi->hw_status = NULL;
8708                 }
8709         }
8710
8711         tg3_mem_rx_release(tp);
8712         tg3_mem_tx_release(tp);
8713
8714         /* tp->hw_stats can be referenced safely:
8715          *     1. under rtnl_lock
8716          *     2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8717          */
8718         if (tp->hw_stats) {
8719                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8720                                   tp->hw_stats, tp->stats_mapping);
8721                 tp->hw_stats = NULL;
8722         }
8723 }
8724
8725 /*
8726  * Must not be invoked with interrupt sources disabled and
8727  * the hardware shutdown down.  Can sleep.
8728  */
8729 static int tg3_alloc_consistent(struct tg3 *tp)
8730 {
8731         int i;
8732
8733         tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8734                                            sizeof(struct tg3_hw_stats),
8735                                            &tp->stats_mapping, GFP_KERNEL);
8736         if (!tp->hw_stats)
8737                 goto err_out;
8738
8739         for (i = 0; i < tp->irq_cnt; i++) {
8740                 struct tg3_napi *tnapi = &tp->napi[i];
8741                 struct tg3_hw_status *sblk;
8742
8743                 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8744                                                        TG3_HW_STATUS_SIZE,
8745                                                        &tnapi->status_mapping,
8746                                                        GFP_KERNEL);
8747                 if (!tnapi->hw_status)
8748                         goto err_out;
8749
8750                 sblk = tnapi->hw_status;
8751
8752                 if (tg3_flag(tp, ENABLE_RSS)) {
8753                         u16 *prodptr = NULL;
8754
8755                         /*
8756                          * When RSS is enabled, the status block format changes
8757                          * slightly.  The "rx_jumbo_consumer", "reserved",
8758                          * and "rx_mini_consumer" members get mapped to the
8759                          * other three rx return ring producer indexes.
8760                          */
8761                         switch (i) {
8762                         case 1:
8763                                 prodptr = &sblk->idx[0].rx_producer;
8764                                 break;
8765                         case 2:
8766                                 prodptr = &sblk->rx_jumbo_consumer;
8767                                 break;
8768                         case 3:
8769                                 prodptr = &sblk->reserved;
8770                                 break;
8771                         case 4:
8772                                 prodptr = &sblk->rx_mini_consumer;
8773                                 break;
8774                         }
8775                         tnapi->rx_rcb_prod_idx = prodptr;
8776                 } else {
8777                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8778                 }
8779         }
8780
8781         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8782                 goto err_out;
8783
8784         return 0;
8785
8786 err_out:
8787         tg3_free_consistent(tp);
8788         return -ENOMEM;
8789 }
8790
8791 #define MAX_WAIT_CNT 1000
8792
8793 /* To stop a block, clear the enable bit and poll till it
8794  * clears.  tp->lock is held.
8795  */
8796 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8797 {
8798         unsigned int i;
8799         u32 val;
8800
8801         if (tg3_flag(tp, 5705_PLUS)) {
8802                 switch (ofs) {
8803                 case RCVLSC_MODE:
8804                 case DMAC_MODE:
8805                 case MBFREE_MODE:
8806                 case BUFMGR_MODE:
8807                 case MEMARB_MODE:
8808                         /* We can't enable/disable these bits of the
8809                          * 5705/5750, just say success.
8810                          */
8811                         return 0;
8812
8813                 default:
8814                         break;
8815                 }
8816         }
8817
8818         val = tr32(ofs);
8819         val &= ~enable_bit;
8820         tw32_f(ofs, val);
8821
8822         for (i = 0; i < MAX_WAIT_CNT; i++) {
8823                 if (pci_channel_offline(tp->pdev)) {
8824                         dev_err(&tp->pdev->dev,
8825                                 "tg3_stop_block device offline, "
8826                                 "ofs=%lx enable_bit=%x\n",
8827                                 ofs, enable_bit);
8828                         return -ENODEV;
8829                 }
8830
8831                 udelay(100);
8832                 val = tr32(ofs);
8833                 if ((val & enable_bit) == 0)
8834                         break;
8835         }
8836
8837         if (i == MAX_WAIT_CNT && !silent) {
8838                 dev_err(&tp->pdev->dev,
8839                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8840                         ofs, enable_bit);
8841                 return -ENODEV;
8842         }
8843
8844         return 0;
8845 }
8846
8847 /* tp->lock is held. */
8848 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8849 {
8850         int i, err;
8851
8852         tg3_disable_ints(tp);
8853
8854         if (pci_channel_offline(tp->pdev)) {
8855                 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8856                 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8857                 err = -ENODEV;
8858                 goto err_no_dev;
8859         }
8860
8861         tp->rx_mode &= ~RX_MODE_ENABLE;
8862         tw32_f(MAC_RX_MODE, tp->rx_mode);
8863         udelay(10);
8864
8865         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8866         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8867         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8868         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8869         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8870         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8871
8872         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8873         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8874         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8875         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8876         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8877         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8878         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8879
8880         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8881         tw32_f(MAC_MODE, tp->mac_mode);
8882         udelay(40);
8883
8884         tp->tx_mode &= ~TX_MODE_ENABLE;
8885         tw32_f(MAC_TX_MODE, tp->tx_mode);
8886
8887         for (i = 0; i < MAX_WAIT_CNT; i++) {
8888                 udelay(100);
8889                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8890                         break;
8891         }
8892         if (i >= MAX_WAIT_CNT) {
8893                 dev_err(&tp->pdev->dev,
8894                         "%s timed out, TX_MODE_ENABLE will not clear "
8895                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8896                 err |= -ENODEV;
8897         }
8898
8899         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8900         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8901         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8902
8903         tw32(FTQ_RESET, 0xffffffff);
8904         tw32(FTQ_RESET, 0x00000000);
8905
8906         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8907         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8908
8909 err_no_dev:
8910         for (i = 0; i < tp->irq_cnt; i++) {
8911                 struct tg3_napi *tnapi = &tp->napi[i];
8912                 if (tnapi->hw_status)
8913                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8914         }
8915
8916         return err;
8917 }
8918
8919 /* Save PCI command register before chip reset */
8920 static void tg3_save_pci_state(struct tg3 *tp)
8921 {
8922         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8923 }
8924
8925 /* Restore PCI state after chip reset */
8926 static void tg3_restore_pci_state(struct tg3 *tp)
8927 {
8928         u32 val;
8929
8930         /* Re-enable indirect register accesses. */
8931         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8932                                tp->misc_host_ctrl);
8933
8934         /* Set MAX PCI retry to zero. */
8935         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8936         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8937             tg3_flag(tp, PCIX_MODE))
8938                 val |= PCISTATE_RETRY_SAME_DMA;
8939         /* Allow reads and writes to the APE register and memory space. */
8940         if (tg3_flag(tp, ENABLE_APE))
8941                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8942                        PCISTATE_ALLOW_APE_SHMEM_WR |
8943                        PCISTATE_ALLOW_APE_PSPACE_WR;
8944         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8945
8946         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8947
8948         if (!tg3_flag(tp, PCI_EXPRESS)) {
8949                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8950                                       tp->pci_cacheline_sz);
8951                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8952                                       tp->pci_lat_timer);
8953         }
8954
8955         /* Make sure PCI-X relaxed ordering bit is clear. */
8956         if (tg3_flag(tp, PCIX_MODE)) {
8957                 u16 pcix_cmd;
8958
8959                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8960                                      &pcix_cmd);
8961                 pcix_cmd &= ~PCI_X_CMD_ERO;
8962                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8963                                       pcix_cmd);
8964         }
8965
8966         if (tg3_flag(tp, 5780_CLASS)) {
8967
8968                 /* Chip reset on 5780 will reset MSI enable bit,
8969                  * so need to restore it.
8970                  */
8971                 if (tg3_flag(tp, USING_MSI)) {
8972                         u16 ctrl;
8973
8974                         pci_read_config_word(tp->pdev,
8975                                              tp->msi_cap + PCI_MSI_FLAGS,
8976                                              &ctrl);
8977                         pci_write_config_word(tp->pdev,
8978                                               tp->msi_cap + PCI_MSI_FLAGS,
8979                                               ctrl | PCI_MSI_FLAGS_ENABLE);
8980                         val = tr32(MSGINT_MODE);
8981                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8982                 }
8983         }
8984 }
8985
8986 static void tg3_override_clk(struct tg3 *tp)
8987 {
8988         u32 val;
8989
8990         switch (tg3_asic_rev(tp)) {
8991         case ASIC_REV_5717:
8992                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
8993                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
8994                      TG3_CPMU_MAC_ORIDE_ENABLE);
8995                 break;
8996
8997         case ASIC_REV_5719:
8998         case ASIC_REV_5720:
8999                 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9000                 break;
9001
9002         default:
9003                 return;
9004         }
9005 }
9006
9007 static void tg3_restore_clk(struct tg3 *tp)
9008 {
9009         u32 val;
9010
9011         switch (tg3_asic_rev(tp)) {
9012         case ASIC_REV_5717:
9013                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9014                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9015                      val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9016                 break;
9017
9018         case ASIC_REV_5719:
9019         case ASIC_REV_5720:
9020                 val = tr32(TG3_CPMU_CLCK_ORIDE);
9021                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9022                 break;
9023
9024         default:
9025                 return;
9026         }
9027 }
9028
9029 /* tp->lock is held. */
9030 static int tg3_chip_reset(struct tg3 *tp)
9031         __releases(tp->lock)
9032         __acquires(tp->lock)
9033 {
9034         u32 val;
9035         void (*write_op)(struct tg3 *, u32, u32);
9036         int i, err;
9037
9038         if (!pci_device_is_present(tp->pdev))
9039                 return -ENODEV;
9040
9041         tg3_nvram_lock(tp);
9042
9043         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9044
9045         /* No matching tg3_nvram_unlock() after this because
9046          * chip reset below will undo the nvram lock.
9047          */
9048         tp->nvram_lock_cnt = 0;
9049
9050         /* GRC_MISC_CFG core clock reset will clear the memory
9051          * enable bit in PCI register 4 and the MSI enable bit
9052          * on some chips, so we save relevant registers here.
9053          */
9054         tg3_save_pci_state(tp);
9055
9056         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9057             tg3_flag(tp, 5755_PLUS))
9058                 tw32(GRC_FASTBOOT_PC, 0);
9059
9060         /*
9061          * We must avoid the readl() that normally takes place.
9062          * It locks machines, causes machine checks, and other
9063          * fun things.  So, temporarily disable the 5701
9064          * hardware workaround, while we do the reset.
9065          */
9066         write_op = tp->write32;
9067         if (write_op == tg3_write_flush_reg32)
9068                 tp->write32 = tg3_write32;
9069
9070         /* Prevent the irq handler from reading or writing PCI registers
9071          * during chip reset when the memory enable bit in the PCI command
9072          * register may be cleared.  The chip does not generate interrupt
9073          * at this time, but the irq handler may still be called due to irq
9074          * sharing or irqpoll.
9075          */
9076         tg3_flag_set(tp, CHIP_RESETTING);
9077         for (i = 0; i < tp->irq_cnt; i++) {
9078                 struct tg3_napi *tnapi = &tp->napi[i];
9079                 if (tnapi->hw_status) {
9080                         tnapi->hw_status->status = 0;
9081                         tnapi->hw_status->status_tag = 0;
9082                 }
9083                 tnapi->last_tag = 0;
9084                 tnapi->last_irq_tag = 0;
9085         }
9086         smp_mb();
9087
9088         tg3_full_unlock(tp);
9089
9090         for (i = 0; i < tp->irq_cnt; i++)
9091                 synchronize_irq(tp->napi[i].irq_vec);
9092
9093         tg3_full_lock(tp, 0);
9094
9095         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9096                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9097                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9098         }
9099
9100         /* do the reset */
9101         val = GRC_MISC_CFG_CORECLK_RESET;
9102
9103         if (tg3_flag(tp, PCI_EXPRESS)) {
9104                 /* Force PCIe 1.0a mode */
9105                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9106                     !tg3_flag(tp, 57765_PLUS) &&
9107                     tr32(TG3_PCIE_PHY_TSTCTL) ==
9108                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9109                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9110
9111                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9112                         tw32(GRC_MISC_CFG, (1 << 29));
9113                         val |= (1 << 29);
9114                 }
9115         }
9116
9117         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9118                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9119                 tw32(GRC_VCPU_EXT_CTRL,
9120                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9121         }
9122
9123         /* Set the clock to the highest frequency to avoid timeouts. With link
9124          * aware mode, the clock speed could be slow and bootcode does not
9125          * complete within the expected time. Override the clock to allow the
9126          * bootcode to finish sooner and then restore it.
9127          */
9128         tg3_override_clk(tp);
9129
9130         /* Manage gphy power for all CPMU absent PCIe devices. */
9131         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9132                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9133
9134         tw32(GRC_MISC_CFG, val);
9135
9136         /* restore 5701 hardware bug workaround write method */
9137         tp->write32 = write_op;
9138
9139         /* Unfortunately, we have to delay before the PCI read back.
9140          * Some 575X chips even will not respond to a PCI cfg access
9141          * when the reset command is given to the chip.
9142          *
9143          * How do these hardware designers expect things to work
9144          * properly if the PCI write is posted for a long period
9145          * of time?  It is always necessary to have some method by
9146          * which a register read back can occur to push the write
9147          * out which does the reset.
9148          *
9149          * For most tg3 variants the trick below was working.
9150          * Ho hum...
9151          */
9152         udelay(120);
9153
9154         /* Flush PCI posted writes.  The normal MMIO registers
9155          * are inaccessible at this time so this is the only
9156          * way to make this reliably (actually, this is no longer
9157          * the case, see above).  I tried to use indirect
9158          * register read/write but this upset some 5701 variants.
9159          */
9160         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9161
9162         udelay(120);
9163
9164         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9165                 u16 val16;
9166
9167                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9168                         int j;
9169                         u32 cfg_val;
9170
9171                         /* Wait for link training to complete.  */
9172                         for (j = 0; j < 5000; j++)
9173                                 udelay(100);
9174
9175                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9176                         pci_write_config_dword(tp->pdev, 0xc4,
9177                                                cfg_val | (1 << 15));
9178                 }
9179
9180                 /* Clear the "no snoop" and "relaxed ordering" bits. */
9181                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9182                 /*
9183                  * Older PCIe devices only support the 128 byte
9184                  * MPS setting.  Enforce the restriction.
9185                  */
9186                 if (!tg3_flag(tp, CPMU_PRESENT))
9187                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9188                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9189
9190                 /* Clear error status */
9191                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9192                                       PCI_EXP_DEVSTA_CED |
9193                                       PCI_EXP_DEVSTA_NFED |
9194                                       PCI_EXP_DEVSTA_FED |
9195                                       PCI_EXP_DEVSTA_URD);
9196         }
9197
9198         tg3_restore_pci_state(tp);
9199
9200         tg3_flag_clear(tp, CHIP_RESETTING);
9201         tg3_flag_clear(tp, ERROR_PROCESSED);
9202
9203         val = 0;
9204         if (tg3_flag(tp, 5780_CLASS))
9205                 val = tr32(MEMARB_MODE);
9206         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9207
9208         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9209                 tg3_stop_fw(tp);
9210                 tw32(0x5000, 0x400);
9211         }
9212
9213         if (tg3_flag(tp, IS_SSB_CORE)) {
9214                 /*
9215                  * BCM4785: In order to avoid repercussions from using
9216                  * potentially defective internal ROM, stop the Rx RISC CPU,
9217                  * which is not required.
9218                  */
9219                 tg3_stop_fw(tp);
9220                 tg3_halt_cpu(tp, RX_CPU_BASE);
9221         }
9222
9223         err = tg3_poll_fw(tp);
9224         if (err)
9225                 return err;
9226
9227         tw32(GRC_MODE, tp->grc_mode);
9228
9229         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9230                 val = tr32(0xc4);
9231
9232                 tw32(0xc4, val | (1 << 15));
9233         }
9234
9235         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9236             tg3_asic_rev(tp) == ASIC_REV_5705) {
9237                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9238                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9239                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9240                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9241         }
9242
9243         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9244                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9245                 val = tp->mac_mode;
9246         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9247                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9248                 val = tp->mac_mode;
9249         } else
9250                 val = 0;
9251
9252         tw32_f(MAC_MODE, val);
9253         udelay(40);
9254
9255         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9256
9257         tg3_mdio_start(tp);
9258
9259         if (tg3_flag(tp, PCI_EXPRESS) &&
9260             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9261             tg3_asic_rev(tp) != ASIC_REV_5785 &&
9262             !tg3_flag(tp, 57765_PLUS)) {
9263                 val = tr32(0x7c00);
9264
9265                 tw32(0x7c00, val | (1 << 25));
9266         }
9267
9268         tg3_restore_clk(tp);
9269
9270         /* Increase the core clock speed to fix tx timeout issue for 5762
9271          * with 100Mbps link speed.
9272          */
9273         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9274                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9275                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9276                      TG3_CPMU_MAC_ORIDE_ENABLE);
9277         }
9278
9279         /* Reprobe ASF enable state.  */
9280         tg3_flag_clear(tp, ENABLE_ASF);
9281         tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9282                            TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9283
9284         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9285         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9286         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9287                 u32 nic_cfg;
9288
9289                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9290                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9291                         tg3_flag_set(tp, ENABLE_ASF);
9292                         tp->last_event_jiffies = jiffies;
9293                         if (tg3_flag(tp, 5750_PLUS))
9294                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9295
9296                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9297                         if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9298                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9299                         if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9300                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9301                 }
9302         }
9303
9304         return 0;
9305 }
9306
9307 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9308 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9309 static void __tg3_set_rx_mode(struct net_device *);
9310
9311 /* tp->lock is held. */
9312 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9313 {
9314         int err;
9315
9316         tg3_stop_fw(tp);
9317
9318         tg3_write_sig_pre_reset(tp, kind);
9319
9320         tg3_abort_hw(tp, silent);
9321         err = tg3_chip_reset(tp);
9322
9323         __tg3_set_mac_addr(tp, false);
9324
9325         tg3_write_sig_legacy(tp, kind);
9326         tg3_write_sig_post_reset(tp, kind);
9327
9328         if (tp->hw_stats) {
9329                 /* Save the stats across chip resets... */
9330                 tg3_get_nstats(tp, &tp->net_stats_prev);
9331                 tg3_get_estats(tp, &tp->estats_prev);
9332
9333                 /* And make sure the next sample is new data */
9334                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9335         }
9336
9337         return err;
9338 }
9339
9340 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9341 {
9342         struct tg3 *tp = netdev_priv(dev);
9343         struct sockaddr *addr = p;
9344         int err = 0;
9345         bool skip_mac_1 = false;
9346
9347         if (!is_valid_ether_addr(addr->sa_data))
9348                 return -EADDRNOTAVAIL;
9349
9350         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9351
9352         if (!netif_running(dev))
9353                 return 0;
9354
9355         if (tg3_flag(tp, ENABLE_ASF)) {
9356                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9357
9358                 addr0_high = tr32(MAC_ADDR_0_HIGH);
9359                 addr0_low = tr32(MAC_ADDR_0_LOW);
9360                 addr1_high = tr32(MAC_ADDR_1_HIGH);
9361                 addr1_low = tr32(MAC_ADDR_1_LOW);
9362
9363                 /* Skip MAC addr 1 if ASF is using it. */
9364                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9365                     !(addr1_high == 0 && addr1_low == 0))
9366                         skip_mac_1 = true;
9367         }
9368         spin_lock_bh(&tp->lock);
9369         __tg3_set_mac_addr(tp, skip_mac_1);
9370         __tg3_set_rx_mode(dev);
9371         spin_unlock_bh(&tp->lock);
9372
9373         return err;
9374 }
9375
9376 /* tp->lock is held. */
9377 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9378                            dma_addr_t mapping, u32 maxlen_flags,
9379                            u32 nic_addr)
9380 {
9381         tg3_write_mem(tp,
9382                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9383                       ((u64) mapping >> 32));
9384         tg3_write_mem(tp,
9385                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9386                       ((u64) mapping & 0xffffffff));
9387         tg3_write_mem(tp,
9388                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9389                        maxlen_flags);
9390
9391         if (!tg3_flag(tp, 5705_PLUS))
9392                 tg3_write_mem(tp,
9393                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9394                               nic_addr);
9395 }
9396
9397
9398 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9399 {
9400         int i = 0;
9401
9402         if (!tg3_flag(tp, ENABLE_TSS)) {
9403                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9404                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9405                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9406         } else {
9407                 tw32(HOSTCC_TXCOL_TICKS, 0);
9408                 tw32(HOSTCC_TXMAX_FRAMES, 0);
9409                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9410
9411                 for (; i < tp->txq_cnt; i++) {
9412                         u32 reg;
9413
9414                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9415                         tw32(reg, ec->tx_coalesce_usecs);
9416                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9417                         tw32(reg, ec->tx_max_coalesced_frames);
9418                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9419                         tw32(reg, ec->tx_max_coalesced_frames_irq);
9420                 }
9421         }
9422
9423         for (; i < tp->irq_max - 1; i++) {
9424                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9425                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9426                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9427         }
9428 }
9429
9430 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9431 {
9432         int i = 0;
9433         u32 limit = tp->rxq_cnt;
9434
9435         if (!tg3_flag(tp, ENABLE_RSS)) {
9436                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9437                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9438                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9439                 limit--;
9440         } else {
9441                 tw32(HOSTCC_RXCOL_TICKS, 0);
9442                 tw32(HOSTCC_RXMAX_FRAMES, 0);
9443                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9444         }
9445
9446         for (; i < limit; i++) {
9447                 u32 reg;
9448
9449                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9450                 tw32(reg, ec->rx_coalesce_usecs);
9451                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9452                 tw32(reg, ec->rx_max_coalesced_frames);
9453                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9454                 tw32(reg, ec->rx_max_coalesced_frames_irq);
9455         }
9456
9457         for (; i < tp->irq_max - 1; i++) {
9458                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9459                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9460                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9461         }
9462 }
9463
9464 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9465 {
9466         tg3_coal_tx_init(tp, ec);
9467         tg3_coal_rx_init(tp, ec);
9468
9469         if (!tg3_flag(tp, 5705_PLUS)) {
9470                 u32 val = ec->stats_block_coalesce_usecs;
9471
9472                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9473                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9474
9475                 if (!tp->link_up)
9476                         val = 0;
9477
9478                 tw32(HOSTCC_STAT_COAL_TICKS, val);
9479         }
9480 }
9481
9482 /* tp->lock is held. */
9483 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9484 {
9485         u32 txrcb, limit;
9486
9487         /* Disable all transmit rings but the first. */
9488         if (!tg3_flag(tp, 5705_PLUS))
9489                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9490         else if (tg3_flag(tp, 5717_PLUS))
9491                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9492         else if (tg3_flag(tp, 57765_CLASS) ||
9493                  tg3_asic_rev(tp) == ASIC_REV_5762)
9494                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9495         else
9496                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9497
9498         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9499              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9500                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9501                               BDINFO_FLAGS_DISABLED);
9502 }
9503
9504 /* tp->lock is held. */
9505 static void tg3_tx_rcbs_init(struct tg3 *tp)
9506 {
9507         int i = 0;
9508         u32 txrcb = NIC_SRAM_SEND_RCB;
9509
9510         if (tg3_flag(tp, ENABLE_TSS))
9511                 i++;
9512
9513         for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9514                 struct tg3_napi *tnapi = &tp->napi[i];
9515
9516                 if (!tnapi->tx_ring)
9517                         continue;
9518
9519                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9520                                (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9521                                NIC_SRAM_TX_BUFFER_DESC);
9522         }
9523 }
9524
9525 /* tp->lock is held. */
9526 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9527 {
9528         u32 rxrcb, limit;
9529
9530         /* Disable all receive return rings but the first. */
9531         if (tg3_flag(tp, 5717_PLUS))
9532                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9533         else if (!tg3_flag(tp, 5705_PLUS))
9534                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9535         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9536                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
9537                  tg3_flag(tp, 57765_CLASS))
9538                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9539         else
9540                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9541
9542         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9543              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9544                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9545                               BDINFO_FLAGS_DISABLED);
9546 }
9547
9548 /* tp->lock is held. */
9549 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9550 {
9551         int i = 0;
9552         u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9553
9554         if (tg3_flag(tp, ENABLE_RSS))
9555                 i++;
9556
9557         for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9558                 struct tg3_napi *tnapi = &tp->napi[i];
9559
9560                 if (!tnapi->rx_rcb)
9561                         continue;
9562
9563                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9564                                (tp->rx_ret_ring_mask + 1) <<
9565                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9566         }
9567 }
9568
9569 /* tp->lock is held. */
9570 static void tg3_rings_reset(struct tg3 *tp)
9571 {
9572         int i;
9573         u32 stblk;
9574         struct tg3_napi *tnapi = &tp->napi[0];
9575
9576         tg3_tx_rcbs_disable(tp);
9577
9578         tg3_rx_ret_rcbs_disable(tp);
9579
9580         /* Disable interrupts */
9581         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9582         tp->napi[0].chk_msi_cnt = 0;
9583         tp->napi[0].last_rx_cons = 0;
9584         tp->napi[0].last_tx_cons = 0;
9585
9586         /* Zero mailbox registers. */
9587         if (tg3_flag(tp, SUPPORT_MSIX)) {
9588                 for (i = 1; i < tp->irq_max; i++) {
9589                         tp->napi[i].tx_prod = 0;
9590                         tp->napi[i].tx_cons = 0;
9591                         if (tg3_flag(tp, ENABLE_TSS))
9592                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
9593                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
9594                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9595                         tp->napi[i].chk_msi_cnt = 0;
9596                         tp->napi[i].last_rx_cons = 0;
9597                         tp->napi[i].last_tx_cons = 0;
9598                 }
9599                 if (!tg3_flag(tp, ENABLE_TSS))
9600                         tw32_mailbox(tp->napi[0].prodmbox, 0);
9601         } else {
9602                 tp->napi[0].tx_prod = 0;
9603                 tp->napi[0].tx_cons = 0;
9604                 tw32_mailbox(tp->napi[0].prodmbox, 0);
9605                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9606         }
9607
9608         /* Make sure the NIC-based send BD rings are disabled. */
9609         if (!tg3_flag(tp, 5705_PLUS)) {
9610                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9611                 for (i = 0; i < 16; i++)
9612                         tw32_tx_mbox(mbox + i * 8, 0);
9613         }
9614
9615         /* Clear status block in ram. */
9616         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9617
9618         /* Set status block DMA address */
9619         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9620              ((u64) tnapi->status_mapping >> 32));
9621         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9622              ((u64) tnapi->status_mapping & 0xffffffff));
9623
9624         stblk = HOSTCC_STATBLCK_RING1;
9625
9626         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9627                 u64 mapping = (u64)tnapi->status_mapping;
9628                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9629                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9630                 stblk += 8;
9631
9632                 /* Clear status block in ram. */
9633                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9634         }
9635
9636         tg3_tx_rcbs_init(tp);
9637         tg3_rx_ret_rcbs_init(tp);
9638 }
9639
9640 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9641 {
9642         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9643
9644         if (!tg3_flag(tp, 5750_PLUS) ||
9645             tg3_flag(tp, 5780_CLASS) ||
9646             tg3_asic_rev(tp) == ASIC_REV_5750 ||
9647             tg3_asic_rev(tp) == ASIC_REV_5752 ||
9648             tg3_flag(tp, 57765_PLUS))
9649                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9650         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9651                  tg3_asic_rev(tp) == ASIC_REV_5787)
9652                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9653         else
9654                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9655
9656         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9657         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9658
9659         val = min(nic_rep_thresh, host_rep_thresh);
9660         tw32(RCVBDI_STD_THRESH, val);
9661
9662         if (tg3_flag(tp, 57765_PLUS))
9663                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9664
9665         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9666                 return;
9667
9668         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9669
9670         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9671
9672         val = min(bdcache_maxcnt / 2, host_rep_thresh);
9673         tw32(RCVBDI_JUMBO_THRESH, val);
9674
9675         if (tg3_flag(tp, 57765_PLUS))
9676                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9677 }
9678
9679 static inline u32 calc_crc(unsigned char *buf, int len)
9680 {
9681         u32 reg;
9682         u32 tmp;
9683         int j, k;
9684
9685         reg = 0xffffffff;
9686
9687         for (j = 0; j < len; j++) {
9688                 reg ^= buf[j];
9689
9690                 for (k = 0; k < 8; k++) {
9691                         tmp = reg & 0x01;
9692
9693                         reg >>= 1;
9694
9695                         if (tmp)
9696                                 reg ^= 0xedb88320;
9697                 }
9698         }
9699
9700         return ~reg;
9701 }
9702
9703 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9704 {
9705         /* accept or reject all multicast frames */
9706         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9707         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9708         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9709         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9710 }
9711
9712 static void __tg3_set_rx_mode(struct net_device *dev)
9713 {
9714         struct tg3 *tp = netdev_priv(dev);
9715         u32 rx_mode;
9716
9717         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9718                                   RX_MODE_KEEP_VLAN_TAG);
9719
9720 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9721         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9722          * flag clear.
9723          */
9724         if (!tg3_flag(tp, ENABLE_ASF))
9725                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9726 #endif
9727
9728         if (dev->flags & IFF_PROMISC) {
9729                 /* Promiscuous mode. */
9730                 rx_mode |= RX_MODE_PROMISC;
9731         } else if (dev->flags & IFF_ALLMULTI) {
9732                 /* Accept all multicast. */
9733                 tg3_set_multi(tp, 1);
9734         } else if (netdev_mc_empty(dev)) {
9735                 /* Reject all multicast. */
9736                 tg3_set_multi(tp, 0);
9737         } else {
9738                 /* Accept one or more multicast(s). */
9739                 struct netdev_hw_addr *ha;
9740                 u32 mc_filter[4] = { 0, };
9741                 u32 regidx;
9742                 u32 bit;
9743                 u32 crc;
9744
9745                 netdev_for_each_mc_addr(ha, dev) {
9746                         crc = calc_crc(ha->addr, ETH_ALEN);
9747                         bit = ~crc & 0x7f;
9748                         regidx = (bit & 0x60) >> 5;
9749                         bit &= 0x1f;
9750                         mc_filter[regidx] |= (1 << bit);
9751                 }
9752
9753                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9754                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9755                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9756                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9757         }
9758
9759         if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9760                 rx_mode |= RX_MODE_PROMISC;
9761         } else if (!(dev->flags & IFF_PROMISC)) {
9762                 /* Add all entries into to the mac addr filter list */
9763                 int i = 0;
9764                 struct netdev_hw_addr *ha;
9765
9766                 netdev_for_each_uc_addr(ha, dev) {
9767                         __tg3_set_one_mac_addr(tp, ha->addr,
9768                                                i + TG3_UCAST_ADDR_IDX(tp));
9769                         i++;
9770                 }
9771         }
9772
9773         if (rx_mode != tp->rx_mode) {
9774                 tp->rx_mode = rx_mode;
9775                 tw32_f(MAC_RX_MODE, rx_mode);
9776                 udelay(10);
9777         }
9778 }
9779
9780 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9781 {
9782         int i;
9783
9784         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9785                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9786 }
9787
9788 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9789 {
9790         int i;
9791
9792         if (!tg3_flag(tp, SUPPORT_MSIX))
9793                 return;
9794
9795         if (tp->rxq_cnt == 1) {
9796                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9797                 return;
9798         }
9799
9800         /* Validate table against current IRQ count */
9801         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9802                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9803                         break;
9804         }
9805
9806         if (i != TG3_RSS_INDIR_TBL_SIZE)
9807                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9808 }
9809
9810 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9811 {
9812         int i = 0;
9813         u32 reg = MAC_RSS_INDIR_TBL_0;
9814
9815         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9816                 u32 val = tp->rss_ind_tbl[i];
9817                 i++;
9818                 for (; i % 8; i++) {
9819                         val <<= 4;
9820                         val |= tp->rss_ind_tbl[i];
9821                 }
9822                 tw32(reg, val);
9823                 reg += 4;
9824         }
9825 }
9826
9827 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9828 {
9829         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9830                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9831         else
9832                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9833 }
9834
9835 /* tp->lock is held. */
9836 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9837 {
9838         u32 val, rdmac_mode;
9839         int i, err, limit;
9840         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9841
9842         tg3_disable_ints(tp);
9843
9844         tg3_stop_fw(tp);
9845
9846         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9847
9848         if (tg3_flag(tp, INIT_COMPLETE))
9849                 tg3_abort_hw(tp, 1);
9850
9851         if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9852             !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9853                 tg3_phy_pull_config(tp);
9854                 tg3_eee_pull_config(tp, NULL);
9855                 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9856         }
9857
9858         /* Enable MAC control of LPI */
9859         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9860                 tg3_setup_eee(tp);
9861
9862         if (reset_phy)
9863                 tg3_phy_reset(tp);
9864
9865         err = tg3_chip_reset(tp);
9866         if (err)
9867                 return err;
9868
9869         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9870
9871         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9872                 val = tr32(TG3_CPMU_CTRL);
9873                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9874                 tw32(TG3_CPMU_CTRL, val);
9875
9876                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9877                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9878                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9879                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9880
9881                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9882                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9883                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9884                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9885
9886                 val = tr32(TG3_CPMU_HST_ACC);
9887                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9888                 val |= CPMU_HST_ACC_MACCLK_6_25;
9889                 tw32(TG3_CPMU_HST_ACC, val);
9890         }
9891
9892         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9893                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9894                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9895                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9896                 tw32(PCIE_PWR_MGMT_THRESH, val);
9897
9898                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9899                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9900
9901                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9902
9903                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9904                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9905         }
9906
9907         if (tg3_flag(tp, L1PLLPD_EN)) {
9908                 u32 grc_mode = tr32(GRC_MODE);
9909
9910                 /* Access the lower 1K of PL PCIE block registers. */
9911                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9912                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9913
9914                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9915                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9916                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9917
9918                 tw32(GRC_MODE, grc_mode);
9919         }
9920
9921         if (tg3_flag(tp, 57765_CLASS)) {
9922                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9923                         u32 grc_mode = tr32(GRC_MODE);
9924
9925                         /* Access the lower 1K of PL PCIE block registers. */
9926                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9927                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9928
9929                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9930                                    TG3_PCIE_PL_LO_PHYCTL5);
9931                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9932                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9933
9934                         tw32(GRC_MODE, grc_mode);
9935                 }
9936
9937                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9938                         u32 grc_mode;
9939
9940                         /* Fix transmit hangs */
9941                         val = tr32(TG3_CPMU_PADRNG_CTL);
9942                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9943                         tw32(TG3_CPMU_PADRNG_CTL, val);
9944
9945                         grc_mode = tr32(GRC_MODE);
9946
9947                         /* Access the lower 1K of DL PCIE block registers. */
9948                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9949                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9950
9951                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9952                                    TG3_PCIE_DL_LO_FTSMAX);
9953                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9954                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9955                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9956
9957                         tw32(GRC_MODE, grc_mode);
9958                 }
9959
9960                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9961                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9962                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9963                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9964         }
9965
9966         /* This works around an issue with Athlon chipsets on
9967          * B3 tigon3 silicon.  This bit has no effect on any
9968          * other revision.  But do not set this on PCI Express
9969          * chips and don't even touch the clocks if the CPMU is present.
9970          */
9971         if (!tg3_flag(tp, CPMU_PRESENT)) {
9972                 if (!tg3_flag(tp, PCI_EXPRESS))
9973                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9974                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9975         }
9976
9977         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9978             tg3_flag(tp, PCIX_MODE)) {
9979                 val = tr32(TG3PCI_PCISTATE);
9980                 val |= PCISTATE_RETRY_SAME_DMA;
9981                 tw32(TG3PCI_PCISTATE, val);
9982         }
9983
9984         if (tg3_flag(tp, ENABLE_APE)) {
9985                 /* Allow reads and writes to the
9986                  * APE register and memory space.
9987                  */
9988                 val = tr32(TG3PCI_PCISTATE);
9989                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9990                        PCISTATE_ALLOW_APE_SHMEM_WR |
9991                        PCISTATE_ALLOW_APE_PSPACE_WR;
9992                 tw32(TG3PCI_PCISTATE, val);
9993         }
9994
9995         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9996                 /* Enable some hw fixes.  */
9997                 val = tr32(TG3PCI_MSI_DATA);
9998                 val |= (1 << 26) | (1 << 28) | (1 << 29);
9999                 tw32(TG3PCI_MSI_DATA, val);
10000         }
10001
10002         /* Descriptor ring init may make accesses to the
10003          * NIC SRAM area to setup the TX descriptors, so we
10004          * can only do this after the hardware has been
10005          * successfully reset.
10006          */
10007         err = tg3_init_rings(tp);
10008         if (err)
10009                 return err;
10010
10011         if (tg3_flag(tp, 57765_PLUS)) {
10012                 val = tr32(TG3PCI_DMA_RW_CTRL) &
10013                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10014                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10015                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10016                 if (!tg3_flag(tp, 57765_CLASS) &&
10017                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
10018                     tg3_asic_rev(tp) != ASIC_REV_5762)
10019                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
10020                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10021         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10022                    tg3_asic_rev(tp) != ASIC_REV_5761) {
10023                 /* This value is determined during the probe time DMA
10024                  * engine test, tg3_test_dma.
10025                  */
10026                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10027         }
10028
10029         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10030                           GRC_MODE_4X_NIC_SEND_RINGS |
10031                           GRC_MODE_NO_TX_PHDR_CSUM |
10032                           GRC_MODE_NO_RX_PHDR_CSUM);
10033         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10034
10035         /* Pseudo-header checksum is done by hardware logic and not
10036          * the offload processers, so make the chip do the pseudo-
10037          * header checksums on receive.  For transmit it is more
10038          * convenient to do the pseudo-header checksum in software
10039          * as Linux does that on transmit for us in all cases.
10040          */
10041         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10042
10043         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10044         if (tp->rxptpctl)
10045                 tw32(TG3_RX_PTP_CTL,
10046                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10047
10048         if (tg3_flag(tp, PTP_CAPABLE))
10049                 val |= GRC_MODE_TIME_SYNC_ENABLE;
10050
10051         tw32(GRC_MODE, tp->grc_mode | val);
10052
10053         /* On one of the AMD platform, MRRS is restricted to 4000 because of
10054          * south bridge limitation. As a workaround, Driver is setting MRRS
10055          * to 2048 instead of default 4096.
10056          */
10057         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10058             tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10059                 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10060                 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10061         }
10062
10063         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
10064         val = tr32(GRC_MISC_CFG);
10065         val &= ~0xff;
10066         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10067         tw32(GRC_MISC_CFG, val);
10068
10069         /* Initialize MBUF/DESC pool. */
10070         if (tg3_flag(tp, 5750_PLUS)) {
10071                 /* Do nothing.  */
10072         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10073                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10074                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10075                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10076                 else
10077                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10078                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10079                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10080         } else if (tg3_flag(tp, TSO_CAPABLE)) {
10081                 int fw_len;
10082
10083                 fw_len = tp->fw_len;
10084                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10085                 tw32(BUFMGR_MB_POOL_ADDR,
10086                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10087                 tw32(BUFMGR_MB_POOL_SIZE,
10088                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10089         }
10090
10091         if (tp->dev->mtu <= ETH_DATA_LEN) {
10092                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10093                      tp->bufmgr_config.mbuf_read_dma_low_water);
10094                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10095                      tp->bufmgr_config.mbuf_mac_rx_low_water);
10096                 tw32(BUFMGR_MB_HIGH_WATER,
10097                      tp->bufmgr_config.mbuf_high_water);
10098         } else {
10099                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10100                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10101                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10102                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10103                 tw32(BUFMGR_MB_HIGH_WATER,
10104                      tp->bufmgr_config.mbuf_high_water_jumbo);
10105         }
10106         tw32(BUFMGR_DMA_LOW_WATER,
10107              tp->bufmgr_config.dma_low_water);
10108         tw32(BUFMGR_DMA_HIGH_WATER,
10109              tp->bufmgr_config.dma_high_water);
10110
10111         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10112         if (tg3_asic_rev(tp) == ASIC_REV_5719)
10113                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10114         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10115             tg3_asic_rev(tp) == ASIC_REV_5762 ||
10116             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10117             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10118                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10119         tw32(BUFMGR_MODE, val);
10120         for (i = 0; i < 2000; i++) {
10121                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10122                         break;
10123                 udelay(10);
10124         }
10125         if (i >= 2000) {
10126                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10127                 return -ENODEV;
10128         }
10129
10130         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10131                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10132
10133         tg3_setup_rxbd_thresholds(tp);
10134
10135         /* Initialize TG3_BDINFO's at:
10136          *  RCVDBDI_STD_BD:     standard eth size rx ring
10137          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
10138          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
10139          *
10140          * like so:
10141          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
10142          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
10143          *                              ring attribute flags
10144          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
10145          *
10146          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10147          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10148          *
10149          * The size of each ring is fixed in the firmware, but the location is
10150          * configurable.
10151          */
10152         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10153              ((u64) tpr->rx_std_mapping >> 32));
10154         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10155              ((u64) tpr->rx_std_mapping & 0xffffffff));
10156         if (!tg3_flag(tp, 5717_PLUS))
10157                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10158                      NIC_SRAM_RX_BUFFER_DESC);
10159
10160         /* Disable the mini ring */
10161         if (!tg3_flag(tp, 5705_PLUS))
10162                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10163                      BDINFO_FLAGS_DISABLED);
10164
10165         /* Program the jumbo buffer descriptor ring control
10166          * blocks on those devices that have them.
10167          */
10168         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10169             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10170
10171                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10172                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10173                              ((u64) tpr->rx_jmb_mapping >> 32));
10174                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10175                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10176                         val = TG3_RX_JMB_RING_SIZE(tp) <<
10177                               BDINFO_FLAGS_MAXLEN_SHIFT;
10178                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10179                              val | BDINFO_FLAGS_USE_EXT_RECV);
10180                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10181                             tg3_flag(tp, 57765_CLASS) ||
10182                             tg3_asic_rev(tp) == ASIC_REV_5762)
10183                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10184                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10185                 } else {
10186                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10187                              BDINFO_FLAGS_DISABLED);
10188                 }
10189
10190                 if (tg3_flag(tp, 57765_PLUS)) {
10191                         val = TG3_RX_STD_RING_SIZE(tp);
10192                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10193                         val |= (TG3_RX_STD_DMA_SZ << 2);
10194                 } else
10195                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10196         } else
10197                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10198
10199         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10200
10201         tpr->rx_std_prod_idx = tp->rx_pending;
10202         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10203
10204         tpr->rx_jmb_prod_idx =
10205                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10206         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10207
10208         tg3_rings_reset(tp);
10209
10210         /* Initialize MAC address and backoff seed. */
10211         __tg3_set_mac_addr(tp, false);
10212
10213         /* MTU + ethernet header + FCS + optional VLAN tag */
10214         tw32(MAC_RX_MTU_SIZE,
10215              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10216
10217         /* The slot time is changed by tg3_setup_phy if we
10218          * run at gigabit with half duplex.
10219          */
10220         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10221               (6 << TX_LENGTHS_IPG_SHIFT) |
10222               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10223
10224         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10225             tg3_asic_rev(tp) == ASIC_REV_5762)
10226                 val |= tr32(MAC_TX_LENGTHS) &
10227                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
10228                         TX_LENGTHS_CNT_DWN_VAL_MSK);
10229
10230         tw32(MAC_TX_LENGTHS, val);
10231
10232         /* Receive rules. */
10233         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10234         tw32(RCVLPC_CONFIG, 0x0181);
10235
10236         /* Calculate RDMAC_MODE setting early, we need it to determine
10237          * the RCVLPC_STATE_ENABLE mask.
10238          */
10239         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10240                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10241                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10242                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10243                       RDMAC_MODE_LNGREAD_ENAB);
10244
10245         if (tg3_asic_rev(tp) == ASIC_REV_5717)
10246                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10247
10248         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10249             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10250             tg3_asic_rev(tp) == ASIC_REV_57780)
10251                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10252                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10253                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10254
10255         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10256             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10257                 if (tg3_flag(tp, TSO_CAPABLE) &&
10258                     tg3_asic_rev(tp) == ASIC_REV_5705) {
10259                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10260                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10261                            !tg3_flag(tp, IS_5788)) {
10262                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10263                 }
10264         }
10265
10266         if (tg3_flag(tp, PCI_EXPRESS))
10267                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10268
10269         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10270                 tp->dma_limit = 0;
10271                 if (tp->dev->mtu <= ETH_DATA_LEN) {
10272                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10273                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10274                 }
10275         }
10276
10277         if (tg3_flag(tp, HW_TSO_1) ||
10278             tg3_flag(tp, HW_TSO_2) ||
10279             tg3_flag(tp, HW_TSO_3))
10280                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10281
10282         if (tg3_flag(tp, 57765_PLUS) ||
10283             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10284             tg3_asic_rev(tp) == ASIC_REV_57780)
10285                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10286
10287         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10288             tg3_asic_rev(tp) == ASIC_REV_5762)
10289                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10290
10291         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10292             tg3_asic_rev(tp) == ASIC_REV_5784 ||
10293             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10294             tg3_asic_rev(tp) == ASIC_REV_57780 ||
10295             tg3_flag(tp, 57765_PLUS)) {
10296                 u32 tgtreg;
10297
10298                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10299                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10300                 else
10301                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
10302
10303                 val = tr32(tgtreg);
10304                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10305                     tg3_asic_rev(tp) == ASIC_REV_5762) {
10306                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10307                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10308                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10309                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10310                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10311                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10312                 }
10313                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10314         }
10315
10316         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10317             tg3_asic_rev(tp) == ASIC_REV_5720 ||
10318             tg3_asic_rev(tp) == ASIC_REV_5762) {
10319                 u32 tgtreg;
10320
10321                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10322                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10323                 else
10324                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10325
10326                 val = tr32(tgtreg);
10327                 tw32(tgtreg, val |
10328                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10329                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10330         }
10331
10332         /* Receive/send statistics. */
10333         if (tg3_flag(tp, 5750_PLUS)) {
10334                 val = tr32(RCVLPC_STATS_ENABLE);
10335                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10336                 tw32(RCVLPC_STATS_ENABLE, val);
10337         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10338                    tg3_flag(tp, TSO_CAPABLE)) {
10339                 val = tr32(RCVLPC_STATS_ENABLE);
10340                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10341                 tw32(RCVLPC_STATS_ENABLE, val);
10342         } else {
10343                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10344         }
10345         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10346         tw32(SNDDATAI_STATSENAB, 0xffffff);
10347         tw32(SNDDATAI_STATSCTRL,
10348              (SNDDATAI_SCTRL_ENABLE |
10349               SNDDATAI_SCTRL_FASTUPD));
10350
10351         /* Setup host coalescing engine. */
10352         tw32(HOSTCC_MODE, 0);
10353         for (i = 0; i < 2000; i++) {
10354                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10355                         break;
10356                 udelay(10);
10357         }
10358
10359         __tg3_set_coalesce(tp, &tp->coal);
10360
10361         if (!tg3_flag(tp, 5705_PLUS)) {
10362                 /* Status/statistics block address.  See tg3_timer,
10363                  * the tg3_periodic_fetch_stats call there, and
10364                  * tg3_get_stats to see how this works for 5705/5750 chips.
10365                  */
10366                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10367                      ((u64) tp->stats_mapping >> 32));
10368                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10369                      ((u64) tp->stats_mapping & 0xffffffff));
10370                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10371
10372                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10373
10374                 /* Clear statistics and status block memory areas */
10375                 for (i = NIC_SRAM_STATS_BLK;
10376                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10377                      i += sizeof(u32)) {
10378                         tg3_write_mem(tp, i, 0);
10379                         udelay(40);
10380                 }
10381         }
10382
10383         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10384
10385         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10386         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10387         if (!tg3_flag(tp, 5705_PLUS))
10388                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10389
10390         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10391                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10392                 /* reset to prevent losing 1st rx packet intermittently */
10393                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10394                 udelay(10);
10395         }
10396
10397         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10398                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10399                         MAC_MODE_FHDE_ENABLE;
10400         if (tg3_flag(tp, ENABLE_APE))
10401                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10402         if (!tg3_flag(tp, 5705_PLUS) &&
10403             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10404             tg3_asic_rev(tp) != ASIC_REV_5700)
10405                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10406         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10407         udelay(40);
10408
10409         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10410          * If TG3_FLAG_IS_NIC is zero, we should read the
10411          * register to preserve the GPIO settings for LOMs. The GPIOs,
10412          * whether used as inputs or outputs, are set by boot code after
10413          * reset.
10414          */
10415         if (!tg3_flag(tp, IS_NIC)) {
10416                 u32 gpio_mask;
10417
10418                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10419                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10420                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10421
10422                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10423                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10424                                      GRC_LCLCTRL_GPIO_OUTPUT3;
10425
10426                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10427                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10428
10429                 tp->grc_local_ctrl &= ~gpio_mask;
10430                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10431
10432                 /* GPIO1 must be driven high for eeprom write protect */
10433                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10434                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10435                                                GRC_LCLCTRL_GPIO_OUTPUT1);
10436         }
10437         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10438         udelay(100);
10439
10440         if (tg3_flag(tp, USING_MSIX)) {
10441                 val = tr32(MSGINT_MODE);
10442                 val |= MSGINT_MODE_ENABLE;
10443                 if (tp->irq_cnt > 1)
10444                         val |= MSGINT_MODE_MULTIVEC_EN;
10445                 if (!tg3_flag(tp, 1SHOT_MSI))
10446                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10447                 tw32(MSGINT_MODE, val);
10448         }
10449
10450         if (!tg3_flag(tp, 5705_PLUS)) {
10451                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10452                 udelay(40);
10453         }
10454
10455         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10456                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10457                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10458                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10459                WDMAC_MODE_LNGREAD_ENAB);
10460
10461         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10462             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10463                 if (tg3_flag(tp, TSO_CAPABLE) &&
10464                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10465                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10466                         /* nothing */
10467                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10468                            !tg3_flag(tp, IS_5788)) {
10469                         val |= WDMAC_MODE_RX_ACCEL;
10470                 }
10471         }
10472
10473         /* Enable host coalescing bug fix */
10474         if (tg3_flag(tp, 5755_PLUS))
10475                 val |= WDMAC_MODE_STATUS_TAG_FIX;
10476
10477         if (tg3_asic_rev(tp) == ASIC_REV_5785)
10478                 val |= WDMAC_MODE_BURST_ALL_DATA;
10479
10480         tw32_f(WDMAC_MODE, val);
10481         udelay(40);
10482
10483         if (tg3_flag(tp, PCIX_MODE)) {
10484                 u16 pcix_cmd;
10485
10486                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10487                                      &pcix_cmd);
10488                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10489                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10490                         pcix_cmd |= PCI_X_CMD_READ_2K;
10491                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10492                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10493                         pcix_cmd |= PCI_X_CMD_READ_2K;
10494                 }
10495                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10496                                       pcix_cmd);
10497         }
10498
10499         tw32_f(RDMAC_MODE, rdmac_mode);
10500         udelay(40);
10501
10502         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10503             tg3_asic_rev(tp) == ASIC_REV_5720) {
10504                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10505                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10506                                 break;
10507                 }
10508                 if (i < TG3_NUM_RDMA_CHANNELS) {
10509                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10510                         val |= tg3_lso_rd_dma_workaround_bit(tp);
10511                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10512                         tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10513                 }
10514         }
10515
10516         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10517         if (!tg3_flag(tp, 5705_PLUS))
10518                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10519
10520         if (tg3_asic_rev(tp) == ASIC_REV_5761)
10521                 tw32(SNDDATAC_MODE,
10522                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10523         else
10524                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10525
10526         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10527         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10528         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10529         if (tg3_flag(tp, LRG_PROD_RING_CAP))
10530                 val |= RCVDBDI_MODE_LRG_RING_SZ;
10531         tw32(RCVDBDI_MODE, val);
10532         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10533         if (tg3_flag(tp, HW_TSO_1) ||
10534             tg3_flag(tp, HW_TSO_2) ||
10535             tg3_flag(tp, HW_TSO_3))
10536                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10537         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10538         if (tg3_flag(tp, ENABLE_TSS))
10539                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10540         tw32(SNDBDI_MODE, val);
10541         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10542
10543         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10544                 err = tg3_load_5701_a0_firmware_fix(tp);
10545                 if (err)
10546                         return err;
10547         }
10548
10549         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10550                 /* Ignore any errors for the firmware download. If download
10551                  * fails, the device will operate with EEE disabled
10552                  */
10553                 tg3_load_57766_firmware(tp);
10554         }
10555
10556         if (tg3_flag(tp, TSO_CAPABLE)) {
10557                 err = tg3_load_tso_firmware(tp);
10558                 if (err)
10559                         return err;
10560         }
10561
10562         tp->tx_mode = TX_MODE_ENABLE;
10563
10564         if (tg3_flag(tp, 5755_PLUS) ||
10565             tg3_asic_rev(tp) == ASIC_REV_5906)
10566                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10567
10568         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10569             tg3_asic_rev(tp) == ASIC_REV_5762) {
10570                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10571                 tp->tx_mode &= ~val;
10572                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10573         }
10574
10575         tw32_f(MAC_TX_MODE, tp->tx_mode);
10576         udelay(100);
10577
10578         if (tg3_flag(tp, ENABLE_RSS)) {
10579                 u32 rss_key[10];
10580
10581                 tg3_rss_write_indir_tbl(tp);
10582
10583                 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10584
10585                 for (i = 0; i < 10 ; i++)
10586                         tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10587         }
10588
10589         tp->rx_mode = RX_MODE_ENABLE;
10590         if (tg3_flag(tp, 5755_PLUS))
10591                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10592
10593         if (tg3_asic_rev(tp) == ASIC_REV_5762)
10594                 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10595
10596         if (tg3_flag(tp, ENABLE_RSS))
10597                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10598                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
10599                                RX_MODE_RSS_IPV6_HASH_EN |
10600                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
10601                                RX_MODE_RSS_IPV4_HASH_EN |
10602                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
10603
10604         tw32_f(MAC_RX_MODE, tp->rx_mode);
10605         udelay(10);
10606
10607         tw32(MAC_LED_CTRL, tp->led_ctrl);
10608
10609         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10610         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10611                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10612                 udelay(10);
10613         }
10614         tw32_f(MAC_RX_MODE, tp->rx_mode);
10615         udelay(10);
10616
10617         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10618                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10619                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10620                         /* Set drive transmission level to 1.2V  */
10621                         /* only if the signal pre-emphasis bit is not set  */
10622                         val = tr32(MAC_SERDES_CFG);
10623                         val &= 0xfffff000;
10624                         val |= 0x880;
10625                         tw32(MAC_SERDES_CFG, val);
10626                 }
10627                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10628                         tw32(MAC_SERDES_CFG, 0x616000);
10629         }
10630
10631         /* Prevent chip from dropping frames when flow control
10632          * is enabled.
10633          */
10634         if (tg3_flag(tp, 57765_CLASS))
10635                 val = 1;
10636         else
10637                 val = 2;
10638         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10639
10640         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10641             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10642                 /* Use hardware link auto-negotiation */
10643                 tg3_flag_set(tp, HW_AUTONEG);
10644         }
10645
10646         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10647             tg3_asic_rev(tp) == ASIC_REV_5714) {
10648                 u32 tmp;
10649
10650                 tmp = tr32(SERDES_RX_CTRL);
10651                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10652                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10653                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10654                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10655         }
10656
10657         if (!tg3_flag(tp, USE_PHYLIB)) {
10658                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10659                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10660
10661                 err = tg3_setup_phy(tp, false);
10662                 if (err)
10663                         return err;
10664
10665                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10666                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10667                         u32 tmp;
10668
10669                         /* Clear CRC stats. */
10670                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10671                                 tg3_writephy(tp, MII_TG3_TEST1,
10672                                              tmp | MII_TG3_TEST1_CRC_EN);
10673                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10674                         }
10675                 }
10676         }
10677
10678         __tg3_set_rx_mode(tp->dev);
10679
10680         /* Initialize receive rules. */
10681         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10682         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10683         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10684         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10685
10686         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10687                 limit = 8;
10688         else
10689                 limit = 16;
10690         if (tg3_flag(tp, ENABLE_ASF))
10691                 limit -= 4;
10692         switch (limit) {
10693         case 16:
10694                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10695         case 15:
10696                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10697         case 14:
10698                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10699         case 13:
10700                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10701         case 12:
10702                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10703         case 11:
10704                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10705         case 10:
10706                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10707         case 9:
10708                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10709         case 8:
10710                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10711         case 7:
10712                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10713         case 6:
10714                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10715         case 5:
10716                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10717         case 4:
10718                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10719         case 3:
10720                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10721         case 2:
10722         case 1:
10723
10724         default:
10725                 break;
10726         }
10727
10728         if (tg3_flag(tp, ENABLE_APE))
10729                 /* Write our heartbeat update interval to APE. */
10730                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10731                                 APE_HOST_HEARTBEAT_INT_DISABLE);
10732
10733         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10734
10735         return 0;
10736 }
10737
10738 /* Called at device open time to get the chip ready for
10739  * packet processing.  Invoked with tp->lock held.
10740  */
10741 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10742 {
10743         /* Chip may have been just powered on. If so, the boot code may still
10744          * be running initialization. Wait for it to finish to avoid races in
10745          * accessing the hardware.
10746          */
10747         tg3_enable_register_access(tp);
10748         tg3_poll_fw(tp);
10749
10750         tg3_switch_clocks(tp);
10751
10752         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10753
10754         return tg3_reset_hw(tp, reset_phy);
10755 }
10756
10757 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10758 {
10759         int i;
10760
10761         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10762                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10763
10764                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10765                 off += len;
10766
10767                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10768                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10769                         memset(ocir, 0, TG3_OCIR_LEN);
10770         }
10771 }
10772
10773 /* sysfs attributes for hwmon */
10774 static ssize_t tg3_show_temp(struct device *dev,
10775                              struct device_attribute *devattr, char *buf)
10776 {
10777         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10778         struct tg3 *tp = dev_get_drvdata(dev);
10779         u32 temperature;
10780
10781         spin_lock_bh(&tp->lock);
10782         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10783                                 sizeof(temperature));
10784         spin_unlock_bh(&tp->lock);
10785         return sprintf(buf, "%u\n", temperature * 1000);
10786 }
10787
10788
10789 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10790                           TG3_TEMP_SENSOR_OFFSET);
10791 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10792                           TG3_TEMP_CAUTION_OFFSET);
10793 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10794                           TG3_TEMP_MAX_OFFSET);
10795
10796 static struct attribute *tg3_attrs[] = {
10797         &sensor_dev_attr_temp1_input.dev_attr.attr,
10798         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10799         &sensor_dev_attr_temp1_max.dev_attr.attr,
10800         NULL
10801 };
10802 ATTRIBUTE_GROUPS(tg3);
10803
10804 static void tg3_hwmon_close(struct tg3 *tp)
10805 {
10806         if (tp->hwmon_dev) {
10807                 hwmon_device_unregister(tp->hwmon_dev);
10808                 tp->hwmon_dev = NULL;
10809         }
10810 }
10811
10812 static void tg3_hwmon_open(struct tg3 *tp)
10813 {
10814         int i;
10815         u32 size = 0;
10816         struct pci_dev *pdev = tp->pdev;
10817         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10818
10819         tg3_sd_scan_scratchpad(tp, ocirs);
10820
10821         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10822                 if (!ocirs[i].src_data_length)
10823                         continue;
10824
10825                 size += ocirs[i].src_hdr_length;
10826                 size += ocirs[i].src_data_length;
10827         }
10828
10829         if (!size)
10830                 return;
10831
10832         tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10833                                                           tp, tg3_groups);
10834         if (IS_ERR(tp->hwmon_dev)) {
10835                 tp->hwmon_dev = NULL;
10836                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10837         }
10838 }
10839
10840
10841 #define TG3_STAT_ADD32(PSTAT, REG) \
10842 do {    u32 __val = tr32(REG); \
10843         (PSTAT)->low += __val; \
10844         if ((PSTAT)->low < __val) \
10845                 (PSTAT)->high += 1; \
10846 } while (0)
10847
10848 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10849 {
10850         struct tg3_hw_stats *sp = tp->hw_stats;
10851
10852         if (!tp->link_up)
10853                 return;
10854
10855         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10856         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10857         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10858         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10859         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10860         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10861         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10862         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10863         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10864         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10865         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10866         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10867         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10868         if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10869                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10870                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10871                 u32 val;
10872
10873                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10874                 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10875                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10876                 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10877         }
10878
10879         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10880         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10881         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10882         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10883         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10884         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10885         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10886         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10887         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10888         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10889         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10890         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10891         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10892         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10893
10894         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10895         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10896             tg3_asic_rev(tp) != ASIC_REV_5762 &&
10897             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10898             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10899                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10900         } else {
10901                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10902                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10903                 if (val) {
10904                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10905                         sp->rx_discards.low += val;
10906                         if (sp->rx_discards.low < val)
10907                                 sp->rx_discards.high += 1;
10908                 }
10909                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10910         }
10911         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10912 }
10913
10914 static void tg3_chk_missed_msi(struct tg3 *tp)
10915 {
10916         u32 i;
10917
10918         for (i = 0; i < tp->irq_cnt; i++) {
10919                 struct tg3_napi *tnapi = &tp->napi[i];
10920
10921                 if (tg3_has_work(tnapi)) {
10922                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10923                             tnapi->last_tx_cons == tnapi->tx_cons) {
10924                                 if (tnapi->chk_msi_cnt < 1) {
10925                                         tnapi->chk_msi_cnt++;
10926                                         return;
10927                                 }
10928                                 tg3_msi(0, tnapi);
10929                         }
10930                 }
10931                 tnapi->chk_msi_cnt = 0;
10932                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10933                 tnapi->last_tx_cons = tnapi->tx_cons;
10934         }
10935 }
10936
10937 static void tg3_timer(unsigned long __opaque)
10938 {
10939         struct tg3 *tp = (struct tg3 *) __opaque;
10940
10941         spin_lock(&tp->lock);
10942
10943         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10944                 spin_unlock(&tp->lock);
10945                 goto restart_timer;
10946         }
10947
10948         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10949             tg3_flag(tp, 57765_CLASS))
10950                 tg3_chk_missed_msi(tp);
10951
10952         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10953                 /* BCM4785: Flush posted writes from GbE to host memory. */
10954                 tr32(HOSTCC_MODE);
10955         }
10956
10957         if (!tg3_flag(tp, TAGGED_STATUS)) {
10958                 /* All of this garbage is because when using non-tagged
10959                  * IRQ status the mailbox/status_block protocol the chip
10960                  * uses with the cpu is race prone.
10961                  */
10962                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10963                         tw32(GRC_LOCAL_CTRL,
10964                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10965                 } else {
10966                         tw32(HOSTCC_MODE, tp->coalesce_mode |
10967                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10968                 }
10969
10970                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10971                         spin_unlock(&tp->lock);
10972                         tg3_reset_task_schedule(tp);
10973                         goto restart_timer;
10974                 }
10975         }
10976
10977         /* This part only runs once per second. */
10978         if (!--tp->timer_counter) {
10979                 if (tg3_flag(tp, 5705_PLUS))
10980                         tg3_periodic_fetch_stats(tp);
10981
10982                 if (tp->setlpicnt && !--tp->setlpicnt)
10983                         tg3_phy_eee_enable(tp);
10984
10985                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10986                         u32 mac_stat;
10987                         int phy_event;
10988
10989                         mac_stat = tr32(MAC_STATUS);
10990
10991                         phy_event = 0;
10992                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10993                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10994                                         phy_event = 1;
10995                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10996                                 phy_event = 1;
10997
10998                         if (phy_event)
10999                                 tg3_setup_phy(tp, false);
11000                 } else if (tg3_flag(tp, POLL_SERDES)) {
11001                         u32 mac_stat = tr32(MAC_STATUS);
11002                         int need_setup = 0;
11003
11004                         if (tp->link_up &&
11005                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11006                                 need_setup = 1;
11007                         }
11008                         if (!tp->link_up &&
11009                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
11010                                          MAC_STATUS_SIGNAL_DET))) {
11011                                 need_setup = 1;
11012                         }
11013                         if (need_setup) {
11014                                 if (!tp->serdes_counter) {
11015                                         tw32_f(MAC_MODE,
11016                                              (tp->mac_mode &
11017                                               ~MAC_MODE_PORT_MODE_MASK));
11018                                         udelay(40);
11019                                         tw32_f(MAC_MODE, tp->mac_mode);
11020                                         udelay(40);
11021                                 }
11022                                 tg3_setup_phy(tp, false);
11023                         }
11024                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11025                            tg3_flag(tp, 5780_CLASS)) {
11026                         tg3_serdes_parallel_detect(tp);
11027                 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11028                         u32 cpmu = tr32(TG3_CPMU_STATUS);
11029                         bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11030                                          TG3_CPMU_STATUS_LINK_MASK);
11031
11032                         if (link_up != tp->link_up)
11033                                 tg3_setup_phy(tp, false);
11034                 }
11035
11036                 tp->timer_counter = tp->timer_multiplier;
11037         }
11038
11039         /* Heartbeat is only sent once every 2 seconds.
11040          *
11041          * The heartbeat is to tell the ASF firmware that the host
11042          * driver is still alive.  In the event that the OS crashes,
11043          * ASF needs to reset the hardware to free up the FIFO space
11044          * that may be filled with rx packets destined for the host.
11045          * If the FIFO is full, ASF will no longer function properly.
11046          *
11047          * Unintended resets have been reported on real time kernels
11048          * where the timer doesn't run on time.  Netpoll will also have
11049          * same problem.
11050          *
11051          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11052          * to check the ring condition when the heartbeat is expiring
11053          * before doing the reset.  This will prevent most unintended
11054          * resets.
11055          */
11056         if (!--tp->asf_counter) {
11057                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11058                         tg3_wait_for_event_ack(tp);
11059
11060                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11061                                       FWCMD_NICDRV_ALIVE3);
11062                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11063                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11064                                       TG3_FW_UPDATE_TIMEOUT_SEC);
11065
11066                         tg3_generate_fw_event(tp);
11067                 }
11068                 tp->asf_counter = tp->asf_multiplier;
11069         }
11070
11071         spin_unlock(&tp->lock);
11072
11073 restart_timer:
11074         tp->timer.expires = jiffies + tp->timer_offset;
11075         add_timer(&tp->timer);
11076 }
11077
11078 static void tg3_timer_init(struct tg3 *tp)
11079 {
11080         if (tg3_flag(tp, TAGGED_STATUS) &&
11081             tg3_asic_rev(tp) != ASIC_REV_5717 &&
11082             !tg3_flag(tp, 57765_CLASS))
11083                 tp->timer_offset = HZ;
11084         else
11085                 tp->timer_offset = HZ / 10;
11086
11087         BUG_ON(tp->timer_offset > HZ);
11088
11089         tp->timer_multiplier = (HZ / tp->timer_offset);
11090         tp->asf_multiplier = (HZ / tp->timer_offset) *
11091                              TG3_FW_UPDATE_FREQ_SEC;
11092
11093         init_timer(&tp->timer);
11094         tp->timer.data = (unsigned long) tp;
11095         tp->timer.function = tg3_timer;
11096 }
11097
11098 static void tg3_timer_start(struct tg3 *tp)
11099 {
11100         tp->asf_counter   = tp->asf_multiplier;
11101         tp->timer_counter = tp->timer_multiplier;
11102
11103         tp->timer.expires = jiffies + tp->timer_offset;
11104         add_timer(&tp->timer);
11105 }
11106
11107 static void tg3_timer_stop(struct tg3 *tp)
11108 {
11109         del_timer_sync(&tp->timer);
11110 }
11111
11112 /* Restart hardware after configuration changes, self-test, etc.
11113  * Invoked with tp->lock held.
11114  */
11115 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11116         __releases(tp->lock)
11117         __acquires(tp->lock)
11118 {
11119         int err;
11120
11121         err = tg3_init_hw(tp, reset_phy);
11122         if (err) {
11123                 netdev_err(tp->dev,
11124                            "Failed to re-initialize device, aborting\n");
11125                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11126                 tg3_full_unlock(tp);
11127                 tg3_timer_stop(tp);
11128                 tp->irq_sync = 0;
11129                 tg3_napi_enable(tp);
11130                 dev_close(tp->dev);
11131                 tg3_full_lock(tp, 0);
11132         }
11133         return err;
11134 }
11135
11136 static void tg3_reset_task(struct work_struct *work)
11137 {
11138         struct tg3 *tp = container_of(work, struct tg3, reset_task);
11139         int err;
11140
11141         rtnl_lock();
11142         tg3_full_lock(tp, 0);
11143
11144         if (!netif_running(tp->dev)) {
11145                 tg3_flag_clear(tp, RESET_TASK_PENDING);
11146                 tg3_full_unlock(tp);
11147                 rtnl_unlock();
11148                 return;
11149         }
11150
11151         tg3_full_unlock(tp);
11152
11153         tg3_phy_stop(tp);
11154
11155         tg3_netif_stop(tp);
11156
11157         tg3_full_lock(tp, 1);
11158
11159         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11160                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11161                 tp->write32_rx_mbox = tg3_write_flush_reg32;
11162                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11163                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11164         }
11165
11166         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11167         err = tg3_init_hw(tp, true);
11168         if (err) {
11169                 tg3_full_unlock(tp);
11170                 tp->irq_sync = 0;
11171                 tg3_napi_enable(tp);
11172                 /* Clear this flag so that tg3_reset_task_cancel() will not
11173                  * call cancel_work_sync() and wait forever.
11174                  */
11175                 tg3_flag_clear(tp, RESET_TASK_PENDING);
11176                 dev_close(tp->dev);
11177                 goto out;
11178         }
11179
11180         tg3_netif_start(tp);
11181
11182         tg3_full_unlock(tp);
11183
11184         if (!err)
11185                 tg3_phy_start(tp);
11186
11187         tg3_flag_clear(tp, RESET_TASK_PENDING);
11188 out:
11189         rtnl_unlock();
11190 }
11191
11192 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11193 {
11194         irq_handler_t fn;
11195         unsigned long flags;
11196         char *name;
11197         struct tg3_napi *tnapi = &tp->napi[irq_num];
11198
11199         if (tp->irq_cnt == 1)
11200                 name = tp->dev->name;
11201         else {
11202                 name = &tnapi->irq_lbl[0];
11203                 if (tnapi->tx_buffers && tnapi->rx_rcb)
11204                         snprintf(name, IFNAMSIZ,
11205                                  "%s-txrx-%d", tp->dev->name, irq_num);
11206                 else if (tnapi->tx_buffers)
11207                         snprintf(name, IFNAMSIZ,
11208                                  "%s-tx-%d", tp->dev->name, irq_num);
11209                 else if (tnapi->rx_rcb)
11210                         snprintf(name, IFNAMSIZ,
11211                                  "%s-rx-%d", tp->dev->name, irq_num);
11212                 else
11213                         snprintf(name, IFNAMSIZ,
11214                                  "%s-%d", tp->dev->name, irq_num);
11215                 name[IFNAMSIZ-1] = 0;
11216         }
11217
11218         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11219                 fn = tg3_msi;
11220                 if (tg3_flag(tp, 1SHOT_MSI))
11221                         fn = tg3_msi_1shot;
11222                 flags = 0;
11223         } else {
11224                 fn = tg3_interrupt;
11225                 if (tg3_flag(tp, TAGGED_STATUS))
11226                         fn = tg3_interrupt_tagged;
11227                 flags = IRQF_SHARED;
11228         }
11229
11230         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11231 }
11232
11233 static int tg3_test_interrupt(struct tg3 *tp)
11234 {
11235         struct tg3_napi *tnapi = &tp->napi[0];
11236         struct net_device *dev = tp->dev;
11237         int err, i, intr_ok = 0;
11238         u32 val;
11239
11240         if (!netif_running(dev))
11241                 return -ENODEV;
11242
11243         tg3_disable_ints(tp);
11244
11245         free_irq(tnapi->irq_vec, tnapi);
11246
11247         /*
11248          * Turn off MSI one shot mode.  Otherwise this test has no
11249          * observable way to know whether the interrupt was delivered.
11250          */
11251         if (tg3_flag(tp, 57765_PLUS)) {
11252                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11253                 tw32(MSGINT_MODE, val);
11254         }
11255
11256         err = request_irq(tnapi->irq_vec, tg3_test_isr,
11257                           IRQF_SHARED, dev->name, tnapi);
11258         if (err)
11259                 return err;
11260
11261         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11262         tg3_enable_ints(tp);
11263
11264         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11265                tnapi->coal_now);
11266
11267         for (i = 0; i < 5; i++) {
11268                 u32 int_mbox, misc_host_ctrl;
11269
11270                 int_mbox = tr32_mailbox(tnapi->int_mbox);
11271                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11272
11273                 if ((int_mbox != 0) ||
11274                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11275                         intr_ok = 1;
11276                         break;
11277                 }
11278
11279                 if (tg3_flag(tp, 57765_PLUS) &&
11280                     tnapi->hw_status->status_tag != tnapi->last_tag)
11281                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11282
11283                 msleep(10);
11284         }
11285
11286         tg3_disable_ints(tp);
11287
11288         free_irq(tnapi->irq_vec, tnapi);
11289
11290         err = tg3_request_irq(tp, 0);
11291
11292         if (err)
11293                 return err;
11294
11295         if (intr_ok) {
11296                 /* Reenable MSI one shot mode. */
11297                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11298                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11299                         tw32(MSGINT_MODE, val);
11300                 }
11301                 return 0;
11302         }
11303
11304         return -EIO;
11305 }
11306
11307 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11308  * successfully restored
11309  */
11310 static int tg3_test_msi(struct tg3 *tp)
11311 {
11312         int err;
11313         u16 pci_cmd;
11314
11315         if (!tg3_flag(tp, USING_MSI))
11316                 return 0;
11317
11318         /* Turn off SERR reporting in case MSI terminates with Master
11319          * Abort.
11320          */
11321         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11322         pci_write_config_word(tp->pdev, PCI_COMMAND,
11323                               pci_cmd & ~PCI_COMMAND_SERR);
11324
11325         err = tg3_test_interrupt(tp);
11326
11327         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11328
11329         if (!err)
11330                 return 0;
11331
11332         /* other failures */
11333         if (err != -EIO)
11334                 return err;
11335
11336         /* MSI test failed, go back to INTx mode */
11337         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11338                     "to INTx mode. Please report this failure to the PCI "
11339                     "maintainer and include system chipset information\n");
11340
11341         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11342
11343         pci_disable_msi(tp->pdev);
11344
11345         tg3_flag_clear(tp, USING_MSI);
11346         tp->napi[0].irq_vec = tp->pdev->irq;
11347
11348         err = tg3_request_irq(tp, 0);
11349         if (err)
11350                 return err;
11351
11352         /* Need to reset the chip because the MSI cycle may have terminated
11353          * with Master Abort.
11354          */
11355         tg3_full_lock(tp, 1);
11356
11357         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11358         err = tg3_init_hw(tp, true);
11359
11360         tg3_full_unlock(tp);
11361
11362         if (err)
11363                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11364
11365         return err;
11366 }
11367
11368 static int tg3_request_firmware(struct tg3 *tp)
11369 {
11370         const struct tg3_firmware_hdr *fw_hdr;
11371
11372         if (reject_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11373                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11374                            tp->fw_needed);
11375                 return -ENOENT;
11376         }
11377
11378         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11379
11380         /* Firmware blob starts with version numbers, followed by
11381          * start address and _full_ length including BSS sections
11382          * (which must be longer than the actual data, of course
11383          */
11384
11385         tp->fw_len = be32_to_cpu(fw_hdr->len);  /* includes bss */
11386         if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11387                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11388                            tp->fw_len, tp->fw_needed);
11389                 release_firmware(tp->fw);
11390                 tp->fw = NULL;
11391                 return -EINVAL;
11392         }
11393
11394         /* We no longer need firmware; we have it. */
11395         tp->fw_needed = NULL;
11396         return 0;
11397 }
11398
11399 static u32 tg3_irq_count(struct tg3 *tp)
11400 {
11401         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11402
11403         if (irq_cnt > 1) {
11404                 /* We want as many rx rings enabled as there are cpus.
11405                  * In multiqueue MSI-X mode, the first MSI-X vector
11406                  * only deals with link interrupts, etc, so we add
11407                  * one to the number of vectors we are requesting.
11408                  */
11409                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11410         }
11411
11412         return irq_cnt;
11413 }
11414
11415 static bool tg3_enable_msix(struct tg3 *tp)
11416 {
11417         int i, rc;
11418         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11419
11420         tp->txq_cnt = tp->txq_req;
11421         tp->rxq_cnt = tp->rxq_req;
11422         if (!tp->rxq_cnt)
11423                 tp->rxq_cnt = netif_get_num_default_rss_queues();
11424         if (tp->rxq_cnt > tp->rxq_max)
11425                 tp->rxq_cnt = tp->rxq_max;
11426
11427         /* Disable multiple TX rings by default.  Simple round-robin hardware
11428          * scheduling of the TX rings can cause starvation of rings with
11429          * small packets when other rings have TSO or jumbo packets.
11430          */
11431         if (!tp->txq_req)
11432                 tp->txq_cnt = 1;
11433
11434         tp->irq_cnt = tg3_irq_count(tp);
11435
11436         for (i = 0; i < tp->irq_max; i++) {
11437                 msix_ent[i].entry  = i;
11438                 msix_ent[i].vector = 0;
11439         }
11440
11441         rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11442         if (rc < 0) {
11443                 return false;
11444         } else if (rc < tp->irq_cnt) {
11445                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11446                               tp->irq_cnt, rc);
11447                 tp->irq_cnt = rc;
11448                 tp->rxq_cnt = max(rc - 1, 1);
11449                 if (tp->txq_cnt)
11450                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11451         }
11452
11453         for (i = 0; i < tp->irq_max; i++)
11454                 tp->napi[i].irq_vec = msix_ent[i].vector;
11455
11456         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11457                 pci_disable_msix(tp->pdev);
11458                 return false;
11459         }
11460
11461         if (tp->irq_cnt == 1)
11462                 return true;
11463
11464         tg3_flag_set(tp, ENABLE_RSS);
11465
11466         if (tp->txq_cnt > 1)
11467                 tg3_flag_set(tp, ENABLE_TSS);
11468
11469         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11470
11471         return true;
11472 }
11473
11474 static void tg3_ints_init(struct tg3 *tp)
11475 {
11476         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11477             !tg3_flag(tp, TAGGED_STATUS)) {
11478                 /* All MSI supporting chips should support tagged
11479                  * status.  Assert that this is the case.
11480                  */
11481                 netdev_warn(tp->dev,
11482                             "MSI without TAGGED_STATUS? Not using MSI\n");
11483                 goto defcfg;
11484         }
11485
11486         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11487                 tg3_flag_set(tp, USING_MSIX);
11488         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11489                 tg3_flag_set(tp, USING_MSI);
11490
11491         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11492                 u32 msi_mode = tr32(MSGINT_MODE);
11493                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11494                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11495                 if (!tg3_flag(tp, 1SHOT_MSI))
11496                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11497                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11498         }
11499 defcfg:
11500         if (!tg3_flag(tp, USING_MSIX)) {
11501                 tp->irq_cnt = 1;
11502                 tp->napi[0].irq_vec = tp->pdev->irq;
11503         }
11504
11505         if (tp->irq_cnt == 1) {
11506                 tp->txq_cnt = 1;
11507                 tp->rxq_cnt = 1;
11508                 netif_set_real_num_tx_queues(tp->dev, 1);
11509                 netif_set_real_num_rx_queues(tp->dev, 1);
11510         }
11511 }
11512
11513 static void tg3_ints_fini(struct tg3 *tp)
11514 {
11515         if (tg3_flag(tp, USING_MSIX))
11516                 pci_disable_msix(tp->pdev);
11517         else if (tg3_flag(tp, USING_MSI))
11518                 pci_disable_msi(tp->pdev);
11519         tg3_flag_clear(tp, USING_MSI);
11520         tg3_flag_clear(tp, USING_MSIX);
11521         tg3_flag_clear(tp, ENABLE_RSS);
11522         tg3_flag_clear(tp, ENABLE_TSS);
11523 }
11524
11525 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11526                      bool init)
11527 {
11528         struct net_device *dev = tp->dev;
11529         int i, err;
11530
11531         /*
11532          * Setup interrupts first so we know how
11533          * many NAPI resources to allocate
11534          */
11535         tg3_ints_init(tp);
11536
11537         tg3_rss_check_indir_tbl(tp);
11538
11539         /* The placement of this call is tied
11540          * to the setup and use of Host TX descriptors.
11541          */
11542         err = tg3_alloc_consistent(tp);
11543         if (err)
11544                 goto out_ints_fini;
11545
11546         tg3_napi_init(tp);
11547
11548         tg3_napi_enable(tp);
11549
11550         for (i = 0; i < tp->irq_cnt; i++) {
11551                 struct tg3_napi *tnapi = &tp->napi[i];
11552                 err = tg3_request_irq(tp, i);
11553                 if (err) {
11554                         for (i--; i >= 0; i--) {
11555                                 tnapi = &tp->napi[i];
11556                                 free_irq(tnapi->irq_vec, tnapi);
11557                         }
11558                         goto out_napi_fini;
11559                 }
11560         }
11561
11562         tg3_full_lock(tp, 0);
11563
11564         if (init)
11565                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11566
11567         err = tg3_init_hw(tp, reset_phy);
11568         if (err) {
11569                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11570                 tg3_free_rings(tp);
11571         }
11572
11573         tg3_full_unlock(tp);
11574
11575         if (err)
11576                 goto out_free_irq;
11577
11578         if (test_irq && tg3_flag(tp, USING_MSI)) {
11579                 err = tg3_test_msi(tp);
11580
11581                 if (err) {
11582                         tg3_full_lock(tp, 0);
11583                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11584                         tg3_free_rings(tp);
11585                         tg3_full_unlock(tp);
11586
11587                         goto out_napi_fini;
11588                 }
11589
11590                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11591                         u32 val = tr32(PCIE_TRANSACTION_CFG);
11592
11593                         tw32(PCIE_TRANSACTION_CFG,
11594                              val | PCIE_TRANS_CFG_1SHOT_MSI);
11595                 }
11596         }
11597
11598         tg3_phy_start(tp);
11599
11600         tg3_hwmon_open(tp);
11601
11602         tg3_full_lock(tp, 0);
11603
11604         tg3_timer_start(tp);
11605         tg3_flag_set(tp, INIT_COMPLETE);
11606         tg3_enable_ints(tp);
11607
11608         tg3_ptp_resume(tp);
11609
11610         tg3_full_unlock(tp);
11611
11612         netif_tx_start_all_queues(dev);
11613
11614         /*
11615          * Reset loopback feature if it was turned on while the device was down
11616          * make sure that it's installed properly now.
11617          */
11618         if (dev->features & NETIF_F_LOOPBACK)
11619                 tg3_set_loopback(dev, dev->features);
11620
11621         return 0;
11622
11623 out_free_irq:
11624         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11625                 struct tg3_napi *tnapi = &tp->napi[i];
11626                 free_irq(tnapi->irq_vec, tnapi);
11627         }
11628
11629 out_napi_fini:
11630         tg3_napi_disable(tp);
11631         tg3_napi_fini(tp);
11632         tg3_free_consistent(tp);
11633
11634 out_ints_fini:
11635         tg3_ints_fini(tp);
11636
11637         return err;
11638 }
11639
11640 static void tg3_stop(struct tg3 *tp)
11641 {
11642         int i;
11643
11644         tg3_reset_task_cancel(tp);
11645         tg3_netif_stop(tp);
11646
11647         tg3_timer_stop(tp);
11648
11649         tg3_hwmon_close(tp);
11650
11651         tg3_phy_stop(tp);
11652
11653         tg3_full_lock(tp, 1);
11654
11655         tg3_disable_ints(tp);
11656
11657         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11658         tg3_free_rings(tp);
11659         tg3_flag_clear(tp, INIT_COMPLETE);
11660
11661         tg3_full_unlock(tp);
11662
11663         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11664                 struct tg3_napi *tnapi = &tp->napi[i];
11665                 free_irq(tnapi->irq_vec, tnapi);
11666         }
11667
11668         tg3_ints_fini(tp);
11669
11670         tg3_napi_fini(tp);
11671
11672         tg3_free_consistent(tp);
11673 }
11674
11675 static int tg3_open(struct net_device *dev)
11676 {
11677         struct tg3 *tp = netdev_priv(dev);
11678         int err;
11679
11680         if (tp->pcierr_recovery) {
11681                 netdev_err(dev, "Failed to open device. PCI error recovery "
11682                            "in progress\n");
11683                 return -EAGAIN;
11684         }
11685
11686         if (tp->fw_needed) {
11687                 err = tg3_request_firmware(tp);
11688                 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11689                         if (err) {
11690                                 netdev_warn(tp->dev, "EEE capability disabled\n");
11691                                 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11692                         } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11693                                 netdev_warn(tp->dev, "EEE capability restored\n");
11694                                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11695                         }
11696                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11697                         if (err)
11698                                 return err;
11699                 } else if (err) {
11700                         netdev_warn(tp->dev, "TSO capability disabled\n");
11701                         tg3_flag_clear(tp, TSO_CAPABLE);
11702                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11703                         netdev_notice(tp->dev, "TSO capability restored\n");
11704                         tg3_flag_set(tp, TSO_CAPABLE);
11705                 }
11706         }
11707
11708         tg3_carrier_off(tp);
11709
11710         err = tg3_power_up(tp);
11711         if (err)
11712                 return err;
11713
11714         tg3_full_lock(tp, 0);
11715
11716         tg3_disable_ints(tp);
11717         tg3_flag_clear(tp, INIT_COMPLETE);
11718
11719         tg3_full_unlock(tp);
11720
11721         err = tg3_start(tp,
11722                         !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11723                         true, true);
11724         if (err) {
11725                 tg3_frob_aux_power(tp, false);
11726                 pci_set_power_state(tp->pdev, PCI_D3hot);
11727         }
11728
11729         return err;
11730 }
11731
11732 static int tg3_close(struct net_device *dev)
11733 {
11734         struct tg3 *tp = netdev_priv(dev);
11735
11736         if (tp->pcierr_recovery) {
11737                 netdev_err(dev, "Failed to close device. PCI error recovery "
11738                            "in progress\n");
11739                 return -EAGAIN;
11740         }
11741
11742         tg3_stop(tp);
11743
11744         /* Clear stats across close / open calls */
11745         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11746         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11747
11748         if (pci_device_is_present(tp->pdev)) {
11749                 tg3_power_down_prepare(tp);
11750
11751                 tg3_carrier_off(tp);
11752         }
11753         return 0;
11754 }
11755
11756 static inline u64 get_stat64(tg3_stat64_t *val)
11757 {
11758        return ((u64)val->high << 32) | ((u64)val->low);
11759 }
11760
11761 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11762 {
11763         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11764
11765         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11766             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11767              tg3_asic_rev(tp) == ASIC_REV_5701)) {
11768                 u32 val;
11769
11770                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11771                         tg3_writephy(tp, MII_TG3_TEST1,
11772                                      val | MII_TG3_TEST1_CRC_EN);
11773                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11774                 } else
11775                         val = 0;
11776
11777                 tp->phy_crc_errors += val;
11778
11779                 return tp->phy_crc_errors;
11780         }
11781
11782         return get_stat64(&hw_stats->rx_fcs_errors);
11783 }
11784
11785 #define ESTAT_ADD(member) \
11786         estats->member =        old_estats->member + \
11787                                 get_stat64(&hw_stats->member)
11788
11789 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11790 {
11791         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11792         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11793
11794         ESTAT_ADD(rx_octets);
11795         ESTAT_ADD(rx_fragments);
11796         ESTAT_ADD(rx_ucast_packets);
11797         ESTAT_ADD(rx_mcast_packets);
11798         ESTAT_ADD(rx_bcast_packets);
11799         ESTAT_ADD(rx_fcs_errors);
11800         ESTAT_ADD(rx_align_errors);
11801         ESTAT_ADD(rx_xon_pause_rcvd);
11802         ESTAT_ADD(rx_xoff_pause_rcvd);
11803         ESTAT_ADD(rx_mac_ctrl_rcvd);
11804         ESTAT_ADD(rx_xoff_entered);
11805         ESTAT_ADD(rx_frame_too_long_errors);
11806         ESTAT_ADD(rx_jabbers);
11807         ESTAT_ADD(rx_undersize_packets);
11808         ESTAT_ADD(rx_in_length_errors);
11809         ESTAT_ADD(rx_out_length_errors);
11810         ESTAT_ADD(rx_64_or_less_octet_packets);
11811         ESTAT_ADD(rx_65_to_127_octet_packets);
11812         ESTAT_ADD(rx_128_to_255_octet_packets);
11813         ESTAT_ADD(rx_256_to_511_octet_packets);
11814         ESTAT_ADD(rx_512_to_1023_octet_packets);
11815         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11816         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11817         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11818         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11819         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11820
11821         ESTAT_ADD(tx_octets);
11822         ESTAT_ADD(tx_collisions);
11823         ESTAT_ADD(tx_xon_sent);
11824         ESTAT_ADD(tx_xoff_sent);
11825         ESTAT_ADD(tx_flow_control);
11826         ESTAT_ADD(tx_mac_errors);
11827         ESTAT_ADD(tx_single_collisions);
11828         ESTAT_ADD(tx_mult_collisions);
11829         ESTAT_ADD(tx_deferred);
11830         ESTAT_ADD(tx_excessive_collisions);
11831         ESTAT_ADD(tx_late_collisions);
11832         ESTAT_ADD(tx_collide_2times);
11833         ESTAT_ADD(tx_collide_3times);
11834         ESTAT_ADD(tx_collide_4times);
11835         ESTAT_ADD(tx_collide_5times);
11836         ESTAT_ADD(tx_collide_6times);
11837         ESTAT_ADD(tx_collide_7times);
11838         ESTAT_ADD(tx_collide_8times);
11839         ESTAT_ADD(tx_collide_9times);
11840         ESTAT_ADD(tx_collide_10times);
11841         ESTAT_ADD(tx_collide_11times);
11842         ESTAT_ADD(tx_collide_12times);
11843         ESTAT_ADD(tx_collide_13times);
11844         ESTAT_ADD(tx_collide_14times);
11845         ESTAT_ADD(tx_collide_15times);
11846         ESTAT_ADD(tx_ucast_packets);
11847         ESTAT_ADD(tx_mcast_packets);
11848         ESTAT_ADD(tx_bcast_packets);
11849         ESTAT_ADD(tx_carrier_sense_errors);
11850         ESTAT_ADD(tx_discards);
11851         ESTAT_ADD(tx_errors);
11852
11853         ESTAT_ADD(dma_writeq_full);
11854         ESTAT_ADD(dma_write_prioq_full);
11855         ESTAT_ADD(rxbds_empty);
11856         ESTAT_ADD(rx_discards);
11857         ESTAT_ADD(rx_errors);
11858         ESTAT_ADD(rx_threshold_hit);
11859
11860         ESTAT_ADD(dma_readq_full);
11861         ESTAT_ADD(dma_read_prioq_full);
11862         ESTAT_ADD(tx_comp_queue_full);
11863
11864         ESTAT_ADD(ring_set_send_prod_index);
11865         ESTAT_ADD(ring_status_update);
11866         ESTAT_ADD(nic_irqs);
11867         ESTAT_ADD(nic_avoided_irqs);
11868         ESTAT_ADD(nic_tx_threshold_hit);
11869
11870         ESTAT_ADD(mbuf_lwm_thresh_hit);
11871 }
11872
11873 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11874 {
11875         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11876         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11877
11878         stats->rx_packets = old_stats->rx_packets +
11879                 get_stat64(&hw_stats->rx_ucast_packets) +
11880                 get_stat64(&hw_stats->rx_mcast_packets) +
11881                 get_stat64(&hw_stats->rx_bcast_packets);
11882
11883         stats->tx_packets = old_stats->tx_packets +
11884                 get_stat64(&hw_stats->tx_ucast_packets) +
11885                 get_stat64(&hw_stats->tx_mcast_packets) +
11886                 get_stat64(&hw_stats->tx_bcast_packets);
11887
11888         stats->rx_bytes = old_stats->rx_bytes +
11889                 get_stat64(&hw_stats->rx_octets);
11890         stats->tx_bytes = old_stats->tx_bytes +
11891                 get_stat64(&hw_stats->tx_octets);
11892
11893         stats->rx_errors = old_stats->rx_errors +
11894                 get_stat64(&hw_stats->rx_errors);
11895         stats->tx_errors = old_stats->tx_errors +
11896                 get_stat64(&hw_stats->tx_errors) +
11897                 get_stat64(&hw_stats->tx_mac_errors) +
11898                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11899                 get_stat64(&hw_stats->tx_discards);
11900
11901         stats->multicast = old_stats->multicast +
11902                 get_stat64(&hw_stats->rx_mcast_packets);
11903         stats->collisions = old_stats->collisions +
11904                 get_stat64(&hw_stats->tx_collisions);
11905
11906         stats->rx_length_errors = old_stats->rx_length_errors +
11907                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11908                 get_stat64(&hw_stats->rx_undersize_packets);
11909
11910         stats->rx_frame_errors = old_stats->rx_frame_errors +
11911                 get_stat64(&hw_stats->rx_align_errors);
11912         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11913                 get_stat64(&hw_stats->tx_discards);
11914         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11915                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11916
11917         stats->rx_crc_errors = old_stats->rx_crc_errors +
11918                 tg3_calc_crc_errors(tp);
11919
11920         stats->rx_missed_errors = old_stats->rx_missed_errors +
11921                 get_stat64(&hw_stats->rx_discards);
11922
11923         stats->rx_dropped = tp->rx_dropped;
11924         stats->tx_dropped = tp->tx_dropped;
11925 }
11926
11927 static int tg3_get_regs_len(struct net_device *dev)
11928 {
11929         return TG3_REG_BLK_SIZE;
11930 }
11931
11932 static void tg3_get_regs(struct net_device *dev,
11933                 struct ethtool_regs *regs, void *_p)
11934 {
11935         struct tg3 *tp = netdev_priv(dev);
11936
11937         regs->version = 0;
11938
11939         memset(_p, 0, TG3_REG_BLK_SIZE);
11940
11941         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11942                 return;
11943
11944         tg3_full_lock(tp, 0);
11945
11946         tg3_dump_legacy_regs(tp, (u32 *)_p);
11947
11948         tg3_full_unlock(tp);
11949 }
11950
11951 static int tg3_get_eeprom_len(struct net_device *dev)
11952 {
11953         struct tg3 *tp = netdev_priv(dev);
11954
11955         return tp->nvram_size;
11956 }
11957
11958 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11959 {
11960         struct tg3 *tp = netdev_priv(dev);
11961         int ret, cpmu_restore = 0;
11962         u8  *pd;
11963         u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11964         __be32 val;
11965
11966         if (tg3_flag(tp, NO_NVRAM))
11967                 return -EINVAL;
11968
11969         offset = eeprom->offset;
11970         len = eeprom->len;
11971         eeprom->len = 0;
11972
11973         eeprom->magic = TG3_EEPROM_MAGIC;
11974
11975         /* Override clock, link aware and link idle modes */
11976         if (tg3_flag(tp, CPMU_PRESENT)) {
11977                 cpmu_val = tr32(TG3_CPMU_CTRL);
11978                 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
11979                                 CPMU_CTRL_LINK_IDLE_MODE)) {
11980                         tw32(TG3_CPMU_CTRL, cpmu_val &
11981                                             ~(CPMU_CTRL_LINK_AWARE_MODE |
11982                                              CPMU_CTRL_LINK_IDLE_MODE));
11983                         cpmu_restore = 1;
11984                 }
11985         }
11986         tg3_override_clk(tp);
11987
11988         if (offset & 3) {
11989                 /* adjustments to start on required 4 byte boundary */
11990                 b_offset = offset & 3;
11991                 b_count = 4 - b_offset;
11992                 if (b_count > len) {
11993                         /* i.e. offset=1 len=2 */
11994                         b_count = len;
11995                 }
11996                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11997                 if (ret)
11998                         goto eeprom_done;
11999                 memcpy(data, ((char *)&val) + b_offset, b_count);
12000                 len -= b_count;
12001                 offset += b_count;
12002                 eeprom->len += b_count;
12003         }
12004
12005         /* read bytes up to the last 4 byte boundary */
12006         pd = &data[eeprom->len];
12007         for (i = 0; i < (len - (len & 3)); i += 4) {
12008                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12009                 if (ret) {
12010                         if (i)
12011                                 i -= 4;
12012                         eeprom->len += i;
12013                         goto eeprom_done;
12014                 }
12015                 memcpy(pd + i, &val, 4);
12016                 if (need_resched()) {
12017                         if (signal_pending(current)) {
12018                                 eeprom->len += i;
12019                                 ret = -EINTR;
12020                                 goto eeprom_done;
12021                         }
12022                         cond_resched();
12023                 }
12024         }
12025         eeprom->len += i;
12026
12027         if (len & 3) {
12028                 /* read last bytes not ending on 4 byte boundary */
12029                 pd = &data[eeprom->len];
12030                 b_count = len & 3;
12031                 b_offset = offset + len - b_count;
12032                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12033                 if (ret)
12034                         goto eeprom_done;
12035                 memcpy(pd, &val, b_count);
12036                 eeprom->len += b_count;
12037         }
12038         ret = 0;
12039
12040 eeprom_done:
12041         /* Restore clock, link aware and link idle modes */
12042         tg3_restore_clk(tp);
12043         if (cpmu_restore)
12044                 tw32(TG3_CPMU_CTRL, cpmu_val);
12045
12046         return ret;
12047 }
12048
12049 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12050 {
12051         struct tg3 *tp = netdev_priv(dev);
12052         int ret;
12053         u32 offset, len, b_offset, odd_len;
12054         u8 *buf;
12055         __be32 start = 0, end;
12056
12057         if (tg3_flag(tp, NO_NVRAM) ||
12058             eeprom->magic != TG3_EEPROM_MAGIC)
12059                 return -EINVAL;
12060
12061         offset = eeprom->offset;
12062         len = eeprom->len;
12063
12064         if ((b_offset = (offset & 3))) {
12065                 /* adjustments to start on required 4 byte boundary */
12066                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12067                 if (ret)
12068                         return ret;
12069                 len += b_offset;
12070                 offset &= ~3;
12071                 if (len < 4)
12072                         len = 4;
12073         }
12074
12075         odd_len = 0;
12076         if (len & 3) {
12077                 /* adjustments to end on required 4 byte boundary */
12078                 odd_len = 1;
12079                 len = (len + 3) & ~3;
12080                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12081                 if (ret)
12082                         return ret;
12083         }
12084
12085         buf = data;
12086         if (b_offset || odd_len) {
12087                 buf = kmalloc(len, GFP_KERNEL);
12088                 if (!buf)
12089                         return -ENOMEM;
12090                 if (b_offset)
12091                         memcpy(buf, &start, 4);
12092                 if (odd_len)
12093                         memcpy(buf+len-4, &end, 4);
12094                 memcpy(buf + b_offset, data, eeprom->len);
12095         }
12096
12097         ret = tg3_nvram_write_block(tp, offset, len, buf);
12098
12099         if (buf != data)
12100                 kfree(buf);
12101
12102         return ret;
12103 }
12104
12105 static int tg3_get_link_ksettings(struct net_device *dev,
12106                                   struct ethtool_link_ksettings *cmd)
12107 {
12108         struct tg3 *tp = netdev_priv(dev);
12109         u32 supported, advertising;
12110
12111         if (tg3_flag(tp, USE_PHYLIB)) {
12112                 struct phy_device *phydev;
12113                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12114                         return -EAGAIN;
12115                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12116                 return phy_ethtool_ksettings_get(phydev, cmd);
12117         }
12118
12119         supported = (SUPPORTED_Autoneg);
12120
12121         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12122                 supported |= (SUPPORTED_1000baseT_Half |
12123                               SUPPORTED_1000baseT_Full);
12124
12125         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12126                 supported |= (SUPPORTED_100baseT_Half |
12127                               SUPPORTED_100baseT_Full |
12128                               SUPPORTED_10baseT_Half |
12129                               SUPPORTED_10baseT_Full |
12130                               SUPPORTED_TP);
12131                 cmd->base.port = PORT_TP;
12132         } else {
12133                 supported |= SUPPORTED_FIBRE;
12134                 cmd->base.port = PORT_FIBRE;
12135         }
12136         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12137                                                 supported);
12138
12139         advertising = tp->link_config.advertising;
12140         if (tg3_flag(tp, PAUSE_AUTONEG)) {
12141                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12142                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12143                                 advertising |= ADVERTISED_Pause;
12144                         } else {
12145                                 advertising |= ADVERTISED_Pause |
12146                                         ADVERTISED_Asym_Pause;
12147                         }
12148                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12149                         advertising |= ADVERTISED_Asym_Pause;
12150                 }
12151         }
12152         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12153                                                 advertising);
12154
12155         if (netif_running(dev) && tp->link_up) {
12156                 cmd->base.speed = tp->link_config.active_speed;
12157                 cmd->base.duplex = tp->link_config.active_duplex;
12158                 ethtool_convert_legacy_u32_to_link_mode(
12159                         cmd->link_modes.lp_advertising,
12160                         tp->link_config.rmt_adv);
12161
12162                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12163                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12164                                 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12165                         else
12166                                 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12167                 }
12168         } else {
12169                 cmd->base.speed = SPEED_UNKNOWN;
12170                 cmd->base.duplex = DUPLEX_UNKNOWN;
12171                 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12172         }
12173         cmd->base.phy_address = tp->phy_addr;
12174         cmd->base.autoneg = tp->link_config.autoneg;
12175         return 0;
12176 }
12177
12178 static int tg3_set_link_ksettings(struct net_device *dev,
12179                                   const struct ethtool_link_ksettings *cmd)
12180 {
12181         struct tg3 *tp = netdev_priv(dev);
12182         u32 speed = cmd->base.speed;
12183         u32 advertising;
12184
12185         if (tg3_flag(tp, USE_PHYLIB)) {
12186                 struct phy_device *phydev;
12187                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12188                         return -EAGAIN;
12189                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12190                 return phy_ethtool_ksettings_set(phydev, cmd);
12191         }
12192
12193         if (cmd->base.autoneg != AUTONEG_ENABLE &&
12194             cmd->base.autoneg != AUTONEG_DISABLE)
12195                 return -EINVAL;
12196
12197         if (cmd->base.autoneg == AUTONEG_DISABLE &&
12198             cmd->base.duplex != DUPLEX_FULL &&
12199             cmd->base.duplex != DUPLEX_HALF)
12200                 return -EINVAL;
12201
12202         ethtool_convert_link_mode_to_legacy_u32(&advertising,
12203                                                 cmd->link_modes.advertising);
12204
12205         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12206                 u32 mask = ADVERTISED_Autoneg |
12207                            ADVERTISED_Pause |
12208                            ADVERTISED_Asym_Pause;
12209
12210                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12211                         mask |= ADVERTISED_1000baseT_Half |
12212                                 ADVERTISED_1000baseT_Full;
12213
12214                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12215                         mask |= ADVERTISED_100baseT_Half |
12216                                 ADVERTISED_100baseT_Full |
12217                                 ADVERTISED_10baseT_Half |
12218                                 ADVERTISED_10baseT_Full |
12219                                 ADVERTISED_TP;
12220                 else
12221                         mask |= ADVERTISED_FIBRE;
12222
12223                 if (advertising & ~mask)
12224                         return -EINVAL;
12225
12226                 mask &= (ADVERTISED_1000baseT_Half |
12227                          ADVERTISED_1000baseT_Full |
12228                          ADVERTISED_100baseT_Half |
12229                          ADVERTISED_100baseT_Full |
12230                          ADVERTISED_10baseT_Half |
12231                          ADVERTISED_10baseT_Full);
12232
12233                 advertising &= mask;
12234         } else {
12235                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12236                         if (speed != SPEED_1000)
12237                                 return -EINVAL;
12238
12239                         if (cmd->base.duplex != DUPLEX_FULL)
12240                                 return -EINVAL;
12241                 } else {
12242                         if (speed != SPEED_100 &&
12243                             speed != SPEED_10)
12244                                 return -EINVAL;
12245                 }
12246         }
12247
12248         tg3_full_lock(tp, 0);
12249
12250         tp->link_config.autoneg = cmd->base.autoneg;
12251         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12252                 tp->link_config.advertising = (advertising |
12253                                               ADVERTISED_Autoneg);
12254                 tp->link_config.speed = SPEED_UNKNOWN;
12255                 tp->link_config.duplex = DUPLEX_UNKNOWN;
12256         } else {
12257                 tp->link_config.advertising = 0;
12258                 tp->link_config.speed = speed;
12259                 tp->link_config.duplex = cmd->base.duplex;
12260         }
12261
12262         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12263
12264         tg3_warn_mgmt_link_flap(tp);
12265
12266         if (netif_running(dev))
12267                 tg3_setup_phy(tp, true);
12268
12269         tg3_full_unlock(tp);
12270
12271         return 0;
12272 }
12273
12274 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12275 {
12276         struct tg3 *tp = netdev_priv(dev);
12277
12278         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12279         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12280         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12281         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12282 }
12283
12284 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12285 {
12286         struct tg3 *tp = netdev_priv(dev);
12287
12288         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12289                 wol->supported = WAKE_MAGIC;
12290         else
12291                 wol->supported = 0;
12292         wol->wolopts = 0;
12293         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12294                 wol->wolopts = WAKE_MAGIC;
12295         memset(&wol->sopass, 0, sizeof(wol->sopass));
12296 }
12297
12298 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12299 {
12300         struct tg3 *tp = netdev_priv(dev);
12301         struct device *dp = &tp->pdev->dev;
12302
12303         if (wol->wolopts & ~WAKE_MAGIC)
12304                 return -EINVAL;
12305         if ((wol->wolopts & WAKE_MAGIC) &&
12306             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12307                 return -EINVAL;
12308
12309         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12310
12311         if (device_may_wakeup(dp))
12312                 tg3_flag_set(tp, WOL_ENABLE);
12313         else
12314                 tg3_flag_clear(tp, WOL_ENABLE);
12315
12316         return 0;
12317 }
12318
12319 static u32 tg3_get_msglevel(struct net_device *dev)
12320 {
12321         struct tg3 *tp = netdev_priv(dev);
12322         return tp->msg_enable;
12323 }
12324
12325 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12326 {
12327         struct tg3 *tp = netdev_priv(dev);
12328         tp->msg_enable = value;
12329 }
12330
12331 static int tg3_nway_reset(struct net_device *dev)
12332 {
12333         struct tg3 *tp = netdev_priv(dev);
12334         int r;
12335
12336         if (!netif_running(dev))
12337                 return -EAGAIN;
12338
12339         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12340                 return -EINVAL;
12341
12342         tg3_warn_mgmt_link_flap(tp);
12343
12344         if (tg3_flag(tp, USE_PHYLIB)) {
12345                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12346                         return -EAGAIN;
12347                 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12348         } else {
12349                 u32 bmcr;
12350
12351                 spin_lock_bh(&tp->lock);
12352                 r = -EINVAL;
12353                 tg3_readphy(tp, MII_BMCR, &bmcr);
12354                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12355                     ((bmcr & BMCR_ANENABLE) ||
12356                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12357                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12358                                                    BMCR_ANENABLE);
12359                         r = 0;
12360                 }
12361                 spin_unlock_bh(&tp->lock);
12362         }
12363
12364         return r;
12365 }
12366
12367 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12368 {
12369         struct tg3 *tp = netdev_priv(dev);
12370
12371         ering->rx_max_pending = tp->rx_std_ring_mask;
12372         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12373                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12374         else
12375                 ering->rx_jumbo_max_pending = 0;
12376
12377         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12378
12379         ering->rx_pending = tp->rx_pending;
12380         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12381                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12382         else
12383                 ering->rx_jumbo_pending = 0;
12384
12385         ering->tx_pending = tp->napi[0].tx_pending;
12386 }
12387
12388 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12389 {
12390         struct tg3 *tp = netdev_priv(dev);
12391         int i, irq_sync = 0, err = 0;
12392         bool reset_phy = false;
12393
12394         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12395             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12396             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12397             (ering->tx_pending <= MAX_SKB_FRAGS) ||
12398             (tg3_flag(tp, TSO_BUG) &&
12399              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12400                 return -EINVAL;
12401
12402         if (netif_running(dev)) {
12403                 tg3_phy_stop(tp);
12404                 tg3_netif_stop(tp);
12405                 irq_sync = 1;
12406         }
12407
12408         tg3_full_lock(tp, irq_sync);
12409
12410         tp->rx_pending = ering->rx_pending;
12411
12412         if (tg3_flag(tp, MAX_RXPEND_64) &&
12413             tp->rx_pending > 63)
12414                 tp->rx_pending = 63;
12415
12416         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12417                 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12418
12419         for (i = 0; i < tp->irq_max; i++)
12420                 tp->napi[i].tx_pending = ering->tx_pending;
12421
12422         if (netif_running(dev)) {
12423                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12424                 /* Reset PHY to avoid PHY lock up */
12425                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12426                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
12427                     tg3_asic_rev(tp) == ASIC_REV_5720)
12428                         reset_phy = true;
12429
12430                 err = tg3_restart_hw(tp, reset_phy);
12431                 if (!err)
12432                         tg3_netif_start(tp);
12433         }
12434
12435         tg3_full_unlock(tp);
12436
12437         if (irq_sync && !err)
12438                 tg3_phy_start(tp);
12439
12440         return err;
12441 }
12442
12443 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12444 {
12445         struct tg3 *tp = netdev_priv(dev);
12446
12447         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12448
12449         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12450                 epause->rx_pause = 1;
12451         else
12452                 epause->rx_pause = 0;
12453
12454         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12455                 epause->tx_pause = 1;
12456         else
12457                 epause->tx_pause = 0;
12458 }
12459
12460 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12461 {
12462         struct tg3 *tp = netdev_priv(dev);
12463         int err = 0;
12464         bool reset_phy = false;
12465
12466         if (tp->link_config.autoneg == AUTONEG_ENABLE)
12467                 tg3_warn_mgmt_link_flap(tp);
12468
12469         if (tg3_flag(tp, USE_PHYLIB)) {
12470                 u32 newadv;
12471                 struct phy_device *phydev;
12472
12473                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12474
12475                 if (!(phydev->supported & SUPPORTED_Pause) ||
12476                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12477                      (epause->rx_pause != epause->tx_pause)))
12478                         return -EINVAL;
12479
12480                 tp->link_config.flowctrl = 0;
12481                 if (epause->rx_pause) {
12482                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12483
12484                         if (epause->tx_pause) {
12485                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12486                                 newadv = ADVERTISED_Pause;
12487                         } else
12488                                 newadv = ADVERTISED_Pause |
12489                                          ADVERTISED_Asym_Pause;
12490                 } else if (epause->tx_pause) {
12491                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12492                         newadv = ADVERTISED_Asym_Pause;
12493                 } else
12494                         newadv = 0;
12495
12496                 if (epause->autoneg)
12497                         tg3_flag_set(tp, PAUSE_AUTONEG);
12498                 else
12499                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12500
12501                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12502                         u32 oldadv = phydev->advertising &
12503                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12504                         if (oldadv != newadv) {
12505                                 phydev->advertising &=
12506                                         ~(ADVERTISED_Pause |
12507                                           ADVERTISED_Asym_Pause);
12508                                 phydev->advertising |= newadv;
12509                                 if (phydev->autoneg) {
12510                                         /*
12511                                          * Always renegotiate the link to
12512                                          * inform our link partner of our
12513                                          * flow control settings, even if the
12514                                          * flow control is forced.  Let
12515                                          * tg3_adjust_link() do the final
12516                                          * flow control setup.
12517                                          */
12518                                         return phy_start_aneg(phydev);
12519                                 }
12520                         }
12521
12522                         if (!epause->autoneg)
12523                                 tg3_setup_flow_control(tp, 0, 0);
12524                 } else {
12525                         tp->link_config.advertising &=
12526                                         ~(ADVERTISED_Pause |
12527                                           ADVERTISED_Asym_Pause);
12528                         tp->link_config.advertising |= newadv;
12529                 }
12530         } else {
12531                 int irq_sync = 0;
12532
12533                 if (netif_running(dev)) {
12534                         tg3_netif_stop(tp);
12535                         irq_sync = 1;
12536                 }
12537
12538                 tg3_full_lock(tp, irq_sync);
12539
12540                 if (epause->autoneg)
12541                         tg3_flag_set(tp, PAUSE_AUTONEG);
12542                 else
12543                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12544                 if (epause->rx_pause)
12545                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12546                 else
12547                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12548                 if (epause->tx_pause)
12549                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12550                 else
12551                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12552
12553                 if (netif_running(dev)) {
12554                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12555                         /* Reset PHY to avoid PHY lock up */
12556                         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12557                             tg3_asic_rev(tp) == ASIC_REV_5719 ||
12558                             tg3_asic_rev(tp) == ASIC_REV_5720)
12559                                 reset_phy = true;
12560
12561                         err = tg3_restart_hw(tp, reset_phy);
12562                         if (!err)
12563                                 tg3_netif_start(tp);
12564                 }
12565
12566                 tg3_full_unlock(tp);
12567         }
12568
12569         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12570
12571         return err;
12572 }
12573
12574 static int tg3_get_sset_count(struct net_device *dev, int sset)
12575 {
12576         switch (sset) {
12577         case ETH_SS_TEST:
12578                 return TG3_NUM_TEST;
12579         case ETH_SS_STATS:
12580                 return TG3_NUM_STATS;
12581         default:
12582                 return -EOPNOTSUPP;
12583         }
12584 }
12585
12586 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12587                          u32 *rules __always_unused)
12588 {
12589         struct tg3 *tp = netdev_priv(dev);
12590
12591         if (!tg3_flag(tp, SUPPORT_MSIX))
12592                 return -EOPNOTSUPP;
12593
12594         switch (info->cmd) {
12595         case ETHTOOL_GRXRINGS:
12596                 if (netif_running(tp->dev))
12597                         info->data = tp->rxq_cnt;
12598                 else {
12599                         info->data = num_online_cpus();
12600                         if (info->data > TG3_RSS_MAX_NUM_QS)
12601                                 info->data = TG3_RSS_MAX_NUM_QS;
12602                 }
12603
12604                 return 0;
12605
12606         default:
12607                 return -EOPNOTSUPP;
12608         }
12609 }
12610
12611 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12612 {
12613         u32 size = 0;
12614         struct tg3 *tp = netdev_priv(dev);
12615
12616         if (tg3_flag(tp, SUPPORT_MSIX))
12617                 size = TG3_RSS_INDIR_TBL_SIZE;
12618
12619         return size;
12620 }
12621
12622 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12623 {
12624         struct tg3 *tp = netdev_priv(dev);
12625         int i;
12626
12627         if (hfunc)
12628                 *hfunc = ETH_RSS_HASH_TOP;
12629         if (!indir)
12630                 return 0;
12631
12632         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12633                 indir[i] = tp->rss_ind_tbl[i];
12634
12635         return 0;
12636 }
12637
12638 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12639                         const u8 hfunc)
12640 {
12641         struct tg3 *tp = netdev_priv(dev);
12642         size_t i;
12643
12644         /* We require at least one supported parameter to be changed and no
12645          * change in any of the unsupported parameters
12646          */
12647         if (key ||
12648             (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12649                 return -EOPNOTSUPP;
12650
12651         if (!indir)
12652                 return 0;
12653
12654         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12655                 tp->rss_ind_tbl[i] = indir[i];
12656
12657         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12658                 return 0;
12659
12660         /* It is legal to write the indirection
12661          * table while the device is running.
12662          */
12663         tg3_full_lock(tp, 0);
12664         tg3_rss_write_indir_tbl(tp);
12665         tg3_full_unlock(tp);
12666
12667         return 0;
12668 }
12669
12670 static void tg3_get_channels(struct net_device *dev,
12671                              struct ethtool_channels *channel)
12672 {
12673         struct tg3 *tp = netdev_priv(dev);
12674         u32 deflt_qs = netif_get_num_default_rss_queues();
12675
12676         channel->max_rx = tp->rxq_max;
12677         channel->max_tx = tp->txq_max;
12678
12679         if (netif_running(dev)) {
12680                 channel->rx_count = tp->rxq_cnt;
12681                 channel->tx_count = tp->txq_cnt;
12682         } else {
12683                 if (tp->rxq_req)
12684                         channel->rx_count = tp->rxq_req;
12685                 else
12686                         channel->rx_count = min(deflt_qs, tp->rxq_max);
12687
12688                 if (tp->txq_req)
12689                         channel->tx_count = tp->txq_req;
12690                 else
12691                         channel->tx_count = min(deflt_qs, tp->txq_max);
12692         }
12693 }
12694
12695 static int tg3_set_channels(struct net_device *dev,
12696                             struct ethtool_channels *channel)
12697 {
12698         struct tg3 *tp = netdev_priv(dev);
12699
12700         if (!tg3_flag(tp, SUPPORT_MSIX))
12701                 return -EOPNOTSUPP;
12702
12703         if (channel->rx_count > tp->rxq_max ||
12704             channel->tx_count > tp->txq_max)
12705                 return -EINVAL;
12706
12707         tp->rxq_req = channel->rx_count;
12708         tp->txq_req = channel->tx_count;
12709
12710         if (!netif_running(dev))
12711                 return 0;
12712
12713         tg3_stop(tp);
12714
12715         tg3_carrier_off(tp);
12716
12717         tg3_start(tp, true, false, false);
12718
12719         return 0;
12720 }
12721
12722 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12723 {
12724         switch (stringset) {
12725         case ETH_SS_STATS:
12726                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12727                 break;
12728         case ETH_SS_TEST:
12729                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12730                 break;
12731         default:
12732                 WARN_ON(1);     /* we need a WARN() */
12733                 break;
12734         }
12735 }
12736
12737 static int tg3_set_phys_id(struct net_device *dev,
12738                             enum ethtool_phys_id_state state)
12739 {
12740         struct tg3 *tp = netdev_priv(dev);
12741
12742         if (!netif_running(tp->dev))
12743                 return -EAGAIN;
12744
12745         switch (state) {
12746         case ETHTOOL_ID_ACTIVE:
12747                 return 1;       /* cycle on/off once per second */
12748
12749         case ETHTOOL_ID_ON:
12750                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12751                      LED_CTRL_1000MBPS_ON |
12752                      LED_CTRL_100MBPS_ON |
12753                      LED_CTRL_10MBPS_ON |
12754                      LED_CTRL_TRAFFIC_OVERRIDE |
12755                      LED_CTRL_TRAFFIC_BLINK |
12756                      LED_CTRL_TRAFFIC_LED);
12757                 break;
12758
12759         case ETHTOOL_ID_OFF:
12760                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12761                      LED_CTRL_TRAFFIC_OVERRIDE);
12762                 break;
12763
12764         case ETHTOOL_ID_INACTIVE:
12765                 tw32(MAC_LED_CTRL, tp->led_ctrl);
12766                 break;
12767         }
12768
12769         return 0;
12770 }
12771
12772 static void tg3_get_ethtool_stats(struct net_device *dev,
12773                                    struct ethtool_stats *estats, u64 *tmp_stats)
12774 {
12775         struct tg3 *tp = netdev_priv(dev);
12776
12777         if (tp->hw_stats)
12778                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12779         else
12780                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12781 }
12782
12783 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12784 {
12785         int i;
12786         __be32 *buf;
12787         u32 offset = 0, len = 0;
12788         u32 magic, val;
12789
12790         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12791                 return NULL;
12792
12793         if (magic == TG3_EEPROM_MAGIC) {
12794                 for (offset = TG3_NVM_DIR_START;
12795                      offset < TG3_NVM_DIR_END;
12796                      offset += TG3_NVM_DIRENT_SIZE) {
12797                         if (tg3_nvram_read(tp, offset, &val))
12798                                 return NULL;
12799
12800                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12801                             TG3_NVM_DIRTYPE_EXTVPD)
12802                                 break;
12803                 }
12804
12805                 if (offset != TG3_NVM_DIR_END) {
12806                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12807                         if (tg3_nvram_read(tp, offset + 4, &offset))
12808                                 return NULL;
12809
12810                         offset = tg3_nvram_logical_addr(tp, offset);
12811                 }
12812         }
12813
12814         if (!offset || !len) {
12815                 offset = TG3_NVM_VPD_OFF;
12816                 len = TG3_NVM_VPD_LEN;
12817         }
12818
12819         buf = kmalloc(len, GFP_KERNEL);
12820         if (buf == NULL)
12821                 return NULL;
12822
12823         if (magic == TG3_EEPROM_MAGIC) {
12824                 for (i = 0; i < len; i += 4) {
12825                         /* The data is in little-endian format in NVRAM.
12826                          * Use the big-endian read routines to preserve
12827                          * the byte order as it exists in NVRAM.
12828                          */
12829                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12830                                 goto error;
12831                 }
12832         } else {
12833                 u8 *ptr;
12834                 ssize_t cnt;
12835                 unsigned int pos = 0;
12836
12837                 ptr = (u8 *)&buf[0];
12838                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12839                         cnt = pci_read_vpd(tp->pdev, pos,
12840                                            len - pos, ptr);
12841                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
12842                                 cnt = 0;
12843                         else if (cnt < 0)
12844                                 goto error;
12845                 }
12846                 if (pos != len)
12847                         goto error;
12848         }
12849
12850         *vpdlen = len;
12851
12852         return buf;
12853
12854 error:
12855         kfree(buf);
12856         return NULL;
12857 }
12858
12859 #define NVRAM_TEST_SIZE 0x100
12860 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
12861 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
12862 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
12863 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
12864 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
12865 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
12866 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12867 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12868
12869 static int tg3_test_nvram(struct tg3 *tp)
12870 {
12871         u32 csum, magic, len;
12872         __be32 *buf;
12873         int i, j, k, err = 0, size;
12874
12875         if (tg3_flag(tp, NO_NVRAM))
12876                 return 0;
12877
12878         if (tg3_nvram_read(tp, 0, &magic) != 0)
12879                 return -EIO;
12880
12881         if (magic == TG3_EEPROM_MAGIC)
12882                 size = NVRAM_TEST_SIZE;
12883         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12884                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12885                     TG3_EEPROM_SB_FORMAT_1) {
12886                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12887                         case TG3_EEPROM_SB_REVISION_0:
12888                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12889                                 break;
12890                         case TG3_EEPROM_SB_REVISION_2:
12891                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12892                                 break;
12893                         case TG3_EEPROM_SB_REVISION_3:
12894                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12895                                 break;
12896                         case TG3_EEPROM_SB_REVISION_4:
12897                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12898                                 break;
12899                         case TG3_EEPROM_SB_REVISION_5:
12900                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12901                                 break;
12902                         case TG3_EEPROM_SB_REVISION_6:
12903                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12904                                 break;
12905                         default:
12906                                 return -EIO;
12907                         }
12908                 } else
12909                         return 0;
12910         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12911                 size = NVRAM_SELFBOOT_HW_SIZE;
12912         else
12913                 return -EIO;
12914
12915         buf = kmalloc(size, GFP_KERNEL);
12916         if (buf == NULL)
12917                 return -ENOMEM;
12918
12919         err = -EIO;
12920         for (i = 0, j = 0; i < size; i += 4, j++) {
12921                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12922                 if (err)
12923                         break;
12924         }
12925         if (i < size)
12926                 goto out;
12927
12928         /* Selfboot format */
12929         magic = be32_to_cpu(buf[0]);
12930         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12931             TG3_EEPROM_MAGIC_FW) {
12932                 u8 *buf8 = (u8 *) buf, csum8 = 0;
12933
12934                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12935                     TG3_EEPROM_SB_REVISION_2) {
12936                         /* For rev 2, the csum doesn't include the MBA. */
12937                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12938                                 csum8 += buf8[i];
12939                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12940                                 csum8 += buf8[i];
12941                 } else {
12942                         for (i = 0; i < size; i++)
12943                                 csum8 += buf8[i];
12944                 }
12945
12946                 if (csum8 == 0) {
12947                         err = 0;
12948                         goto out;
12949                 }
12950
12951                 err = -EIO;
12952                 goto out;
12953         }
12954
12955         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12956             TG3_EEPROM_MAGIC_HW) {
12957                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12958                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12959                 u8 *buf8 = (u8 *) buf;
12960
12961                 /* Separate the parity bits and the data bytes.  */
12962                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12963                         if ((i == 0) || (i == 8)) {
12964                                 int l;
12965                                 u8 msk;
12966
12967                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12968                                         parity[k++] = buf8[i] & msk;
12969                                 i++;
12970                         } else if (i == 16) {
12971                                 int l;
12972                                 u8 msk;
12973
12974                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12975                                         parity[k++] = buf8[i] & msk;
12976                                 i++;
12977
12978                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12979                                         parity[k++] = buf8[i] & msk;
12980                                 i++;
12981                         }
12982                         data[j++] = buf8[i];
12983                 }
12984
12985                 err = -EIO;
12986                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12987                         u8 hw8 = hweight8(data[i]);
12988
12989                         if ((hw8 & 0x1) && parity[i])
12990                                 goto out;
12991                         else if (!(hw8 & 0x1) && !parity[i])
12992                                 goto out;
12993                 }
12994                 err = 0;
12995                 goto out;
12996         }
12997
12998         err = -EIO;
12999
13000         /* Bootstrap checksum at offset 0x10 */
13001         csum = calc_crc((unsigned char *) buf, 0x10);
13002         if (csum != le32_to_cpu(buf[0x10/4]))
13003                 goto out;
13004
13005         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13006         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
13007         if (csum != le32_to_cpu(buf[0xfc/4]))
13008                 goto out;
13009
13010         kfree(buf);
13011
13012         buf = tg3_vpd_readblock(tp, &len);
13013         if (!buf)
13014                 return -ENOMEM;
13015
13016         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
13017         if (i > 0) {
13018                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
13019                 if (j < 0)
13020                         goto out;
13021
13022                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
13023                         goto out;
13024
13025                 i += PCI_VPD_LRDT_TAG_SIZE;
13026                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
13027                                               PCI_VPD_RO_KEYWORD_CHKSUM);
13028                 if (j > 0) {
13029                         u8 csum8 = 0;
13030
13031                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
13032
13033                         for (i = 0; i <= j; i++)
13034                                 csum8 += ((u8 *)buf)[i];
13035
13036                         if (csum8)
13037                                 goto out;
13038                 }
13039         }
13040
13041         err = 0;
13042
13043 out:
13044         kfree(buf);
13045         return err;
13046 }
13047
13048 #define TG3_SERDES_TIMEOUT_SEC  2
13049 #define TG3_COPPER_TIMEOUT_SEC  6
13050
13051 static int tg3_test_link(struct tg3 *tp)
13052 {
13053         int i, max;
13054
13055         if (!netif_running(tp->dev))
13056                 return -ENODEV;
13057
13058         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13059                 max = TG3_SERDES_TIMEOUT_SEC;
13060         else
13061                 max = TG3_COPPER_TIMEOUT_SEC;
13062
13063         for (i = 0; i < max; i++) {
13064                 if (tp->link_up)
13065                         return 0;
13066
13067                 if (msleep_interruptible(1000))
13068                         break;
13069         }
13070
13071         return -EIO;
13072 }
13073
13074 /* Only test the commonly used registers */
13075 static int tg3_test_registers(struct tg3 *tp)
13076 {
13077         int i, is_5705, is_5750;
13078         u32 offset, read_mask, write_mask, val, save_val, read_val;
13079         static struct {
13080                 u16 offset;
13081                 u16 flags;
13082 #define TG3_FL_5705     0x1
13083 #define TG3_FL_NOT_5705 0x2
13084 #define TG3_FL_NOT_5788 0x4
13085 #define TG3_FL_NOT_5750 0x8
13086                 u32 read_mask;
13087                 u32 write_mask;
13088         } reg_tbl[] = {
13089                 /* MAC Control Registers */
13090                 { MAC_MODE, TG3_FL_NOT_5705,
13091                         0x00000000, 0x00ef6f8c },
13092                 { MAC_MODE, TG3_FL_5705,
13093                         0x00000000, 0x01ef6b8c },
13094                 { MAC_STATUS, TG3_FL_NOT_5705,
13095                         0x03800107, 0x00000000 },
13096                 { MAC_STATUS, TG3_FL_5705,
13097                         0x03800100, 0x00000000 },
13098                 { MAC_ADDR_0_HIGH, 0x0000,
13099                         0x00000000, 0x0000ffff },
13100                 { MAC_ADDR_0_LOW, 0x0000,
13101                         0x00000000, 0xffffffff },
13102                 { MAC_RX_MTU_SIZE, 0x0000,
13103                         0x00000000, 0x0000ffff },
13104                 { MAC_TX_MODE, 0x0000,
13105                         0x00000000, 0x00000070 },
13106                 { MAC_TX_LENGTHS, 0x0000,
13107                         0x00000000, 0x00003fff },
13108                 { MAC_RX_MODE, TG3_FL_NOT_5705,
13109                         0x00000000, 0x000007fc },
13110                 { MAC_RX_MODE, TG3_FL_5705,
13111                         0x00000000, 0x000007dc },
13112                 { MAC_HASH_REG_0, 0x0000,
13113                         0x00000000, 0xffffffff },
13114                 { MAC_HASH_REG_1, 0x0000,
13115                         0x00000000, 0xffffffff },
13116                 { MAC_HASH_REG_2, 0x0000,
13117                         0x00000000, 0xffffffff },
13118                 { MAC_HASH_REG_3, 0x0000,
13119                         0x00000000, 0xffffffff },
13120
13121                 /* Receive Data and Receive BD Initiator Control Registers. */
13122                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13123                         0x00000000, 0xffffffff },
13124                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13125                         0x00000000, 0xffffffff },
13126                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13127                         0x00000000, 0x00000003 },
13128                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13129                         0x00000000, 0xffffffff },
13130                 { RCVDBDI_STD_BD+0, 0x0000,
13131                         0x00000000, 0xffffffff },
13132                 { RCVDBDI_STD_BD+4, 0x0000,
13133                         0x00000000, 0xffffffff },
13134                 { RCVDBDI_STD_BD+8, 0x0000,
13135                         0x00000000, 0xffff0002 },
13136                 { RCVDBDI_STD_BD+0xc, 0x0000,
13137                         0x00000000, 0xffffffff },
13138
13139                 /* Receive BD Initiator Control Registers. */
13140                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13141                         0x00000000, 0xffffffff },
13142                 { RCVBDI_STD_THRESH, TG3_FL_5705,
13143                         0x00000000, 0x000003ff },
13144                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13145                         0x00000000, 0xffffffff },
13146
13147                 /* Host Coalescing Control Registers. */
13148                 { HOSTCC_MODE, TG3_FL_NOT_5705,
13149                         0x00000000, 0x00000004 },
13150                 { HOSTCC_MODE, TG3_FL_5705,
13151                         0x00000000, 0x000000f6 },
13152                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13153                         0x00000000, 0xffffffff },
13154                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13155                         0x00000000, 0x000003ff },
13156                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13157                         0x00000000, 0xffffffff },
13158                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13159                         0x00000000, 0x000003ff },
13160                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13161                         0x00000000, 0xffffffff },
13162                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13163                         0x00000000, 0x000000ff },
13164                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13165                         0x00000000, 0xffffffff },
13166                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13167                         0x00000000, 0x000000ff },
13168                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13169                         0x00000000, 0xffffffff },
13170                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13171                         0x00000000, 0xffffffff },
13172                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13173                         0x00000000, 0xffffffff },
13174                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13175                         0x00000000, 0x000000ff },
13176                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13177                         0x00000000, 0xffffffff },
13178                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13179                         0x00000000, 0x000000ff },
13180                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13181                         0x00000000, 0xffffffff },
13182                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13183                         0x00000000, 0xffffffff },
13184                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13185                         0x00000000, 0xffffffff },
13186                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13187                         0x00000000, 0xffffffff },
13188                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13189                         0x00000000, 0xffffffff },
13190                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13191                         0xffffffff, 0x00000000 },
13192                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13193                         0xffffffff, 0x00000000 },
13194
13195                 /* Buffer Manager Control Registers. */
13196                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13197                         0x00000000, 0x007fff80 },
13198                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13199                         0x00000000, 0x007fffff },
13200                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13201                         0x00000000, 0x0000003f },
13202                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13203                         0x00000000, 0x000001ff },
13204                 { BUFMGR_MB_HIGH_WATER, 0x0000,
13205                         0x00000000, 0x000001ff },
13206                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13207                         0xffffffff, 0x00000000 },
13208                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13209                         0xffffffff, 0x00000000 },
13210
13211                 /* Mailbox Registers */
13212                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13213                         0x00000000, 0x000001ff },
13214                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13215                         0x00000000, 0x000001ff },
13216                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13217                         0x00000000, 0x000007ff },
13218                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13219                         0x00000000, 0x000001ff },
13220
13221                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13222         };
13223
13224         is_5705 = is_5750 = 0;
13225         if (tg3_flag(tp, 5705_PLUS)) {
13226                 is_5705 = 1;
13227                 if (tg3_flag(tp, 5750_PLUS))
13228                         is_5750 = 1;
13229         }
13230
13231         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13232                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13233                         continue;
13234
13235                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13236                         continue;
13237
13238                 if (tg3_flag(tp, IS_5788) &&
13239                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
13240                         continue;
13241
13242                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13243                         continue;
13244
13245                 offset = (u32) reg_tbl[i].offset;
13246                 read_mask = reg_tbl[i].read_mask;
13247                 write_mask = reg_tbl[i].write_mask;
13248
13249                 /* Save the original register content */
13250                 save_val = tr32(offset);
13251
13252                 /* Determine the read-only value. */
13253                 read_val = save_val & read_mask;
13254
13255                 /* Write zero to the register, then make sure the read-only bits
13256                  * are not changed and the read/write bits are all zeros.
13257                  */
13258                 tw32(offset, 0);
13259
13260                 val = tr32(offset);
13261
13262                 /* Test the read-only and read/write bits. */
13263                 if (((val & read_mask) != read_val) || (val & write_mask))
13264                         goto out;
13265
13266                 /* Write ones to all the bits defined by RdMask and WrMask, then
13267                  * make sure the read-only bits are not changed and the
13268                  * read/write bits are all ones.
13269                  */
13270                 tw32(offset, read_mask | write_mask);
13271
13272                 val = tr32(offset);
13273
13274                 /* Test the read-only bits. */
13275                 if ((val & read_mask) != read_val)
13276                         goto out;
13277
13278                 /* Test the read/write bits. */
13279                 if ((val & write_mask) != write_mask)
13280                         goto out;
13281
13282                 tw32(offset, save_val);
13283         }
13284
13285         return 0;
13286
13287 out:
13288         if (netif_msg_hw(tp))
13289                 netdev_err(tp->dev,
13290                            "Register test failed at offset %x\n", offset);
13291         tw32(offset, save_val);
13292         return -EIO;
13293 }
13294
13295 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13296 {
13297         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13298         int i;
13299         u32 j;
13300
13301         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13302                 for (j = 0; j < len; j += 4) {
13303                         u32 val;
13304
13305                         tg3_write_mem(tp, offset + j, test_pattern[i]);
13306                         tg3_read_mem(tp, offset + j, &val);
13307                         if (val != test_pattern[i])
13308                                 return -EIO;
13309                 }
13310         }
13311         return 0;
13312 }
13313
13314 static int tg3_test_memory(struct tg3 *tp)
13315 {
13316         static struct mem_entry {
13317                 u32 offset;
13318                 u32 len;
13319         } mem_tbl_570x[] = {
13320                 { 0x00000000, 0x00b50},
13321                 { 0x00002000, 0x1c000},
13322                 { 0xffffffff, 0x00000}
13323         }, mem_tbl_5705[] = {
13324                 { 0x00000100, 0x0000c},
13325                 { 0x00000200, 0x00008},
13326                 { 0x00004000, 0x00800},
13327                 { 0x00006000, 0x01000},
13328                 { 0x00008000, 0x02000},
13329                 { 0x00010000, 0x0e000},
13330                 { 0xffffffff, 0x00000}
13331         }, mem_tbl_5755[] = {
13332                 { 0x00000200, 0x00008},
13333                 { 0x00004000, 0x00800},
13334                 { 0x00006000, 0x00800},
13335                 { 0x00008000, 0x02000},
13336                 { 0x00010000, 0x0c000},
13337                 { 0xffffffff, 0x00000}
13338         }, mem_tbl_5906[] = {
13339                 { 0x00000200, 0x00008},
13340                 { 0x00004000, 0x00400},
13341                 { 0x00006000, 0x00400},
13342                 { 0x00008000, 0x01000},
13343                 { 0x00010000, 0x01000},
13344                 { 0xffffffff, 0x00000}
13345         }, mem_tbl_5717[] = {
13346                 { 0x00000200, 0x00008},
13347                 { 0x00010000, 0x0a000},
13348                 { 0x00020000, 0x13c00},
13349                 { 0xffffffff, 0x00000}
13350         }, mem_tbl_57765[] = {
13351                 { 0x00000200, 0x00008},
13352                 { 0x00004000, 0x00800},
13353                 { 0x00006000, 0x09800},
13354                 { 0x00010000, 0x0a000},
13355                 { 0xffffffff, 0x00000}
13356         };
13357         struct mem_entry *mem_tbl;
13358         int err = 0;
13359         int i;
13360
13361         if (tg3_flag(tp, 5717_PLUS))
13362                 mem_tbl = mem_tbl_5717;
13363         else if (tg3_flag(tp, 57765_CLASS) ||
13364                  tg3_asic_rev(tp) == ASIC_REV_5762)
13365                 mem_tbl = mem_tbl_57765;
13366         else if (tg3_flag(tp, 5755_PLUS))
13367                 mem_tbl = mem_tbl_5755;
13368         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13369                 mem_tbl = mem_tbl_5906;
13370         else if (tg3_flag(tp, 5705_PLUS))
13371                 mem_tbl = mem_tbl_5705;
13372         else
13373                 mem_tbl = mem_tbl_570x;
13374
13375         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13376                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13377                 if (err)
13378                         break;
13379         }
13380
13381         return err;
13382 }
13383
13384 #define TG3_TSO_MSS             500
13385
13386 #define TG3_TSO_IP_HDR_LEN      20
13387 #define TG3_TSO_TCP_HDR_LEN     20
13388 #define TG3_TSO_TCP_OPT_LEN     12
13389
13390 static const u8 tg3_tso_header[] = {
13391 0x08, 0x00,
13392 0x45, 0x00, 0x00, 0x00,
13393 0x00, 0x00, 0x40, 0x00,
13394 0x40, 0x06, 0x00, 0x00,
13395 0x0a, 0x00, 0x00, 0x01,
13396 0x0a, 0x00, 0x00, 0x02,
13397 0x0d, 0x00, 0xe0, 0x00,
13398 0x00, 0x00, 0x01, 0x00,
13399 0x00, 0x00, 0x02, 0x00,
13400 0x80, 0x10, 0x10, 0x00,
13401 0x14, 0x09, 0x00, 0x00,
13402 0x01, 0x01, 0x08, 0x0a,
13403 0x11, 0x11, 0x11, 0x11,
13404 0x11, 0x11, 0x11, 0x11,
13405 };
13406
13407 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13408 {
13409         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13410         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13411         u32 budget;
13412         struct sk_buff *skb;
13413         u8 *tx_data, *rx_data;
13414         dma_addr_t map;
13415         int num_pkts, tx_len, rx_len, i, err;
13416         struct tg3_rx_buffer_desc *desc;
13417         struct tg3_napi *tnapi, *rnapi;
13418         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13419
13420         tnapi = &tp->napi[0];
13421         rnapi = &tp->napi[0];
13422         if (tp->irq_cnt > 1) {
13423                 if (tg3_flag(tp, ENABLE_RSS))
13424                         rnapi = &tp->napi[1];
13425                 if (tg3_flag(tp, ENABLE_TSS))
13426                         tnapi = &tp->napi[1];
13427         }
13428         coal_now = tnapi->coal_now | rnapi->coal_now;
13429
13430         err = -EIO;
13431
13432         tx_len = pktsz;
13433         skb = netdev_alloc_skb(tp->dev, tx_len);
13434         if (!skb)
13435                 return -ENOMEM;
13436
13437         tx_data = skb_put(skb, tx_len);
13438         memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13439         memset(tx_data + ETH_ALEN, 0x0, 8);
13440
13441         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13442
13443         if (tso_loopback) {
13444                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13445
13446                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13447                               TG3_TSO_TCP_OPT_LEN;
13448
13449                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13450                        sizeof(tg3_tso_header));
13451                 mss = TG3_TSO_MSS;
13452
13453                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13454                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13455
13456                 /* Set the total length field in the IP header */
13457                 iph->tot_len = htons((u16)(mss + hdr_len));
13458
13459                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13460                               TXD_FLAG_CPU_POST_DMA);
13461
13462                 if (tg3_flag(tp, HW_TSO_1) ||
13463                     tg3_flag(tp, HW_TSO_2) ||
13464                     tg3_flag(tp, HW_TSO_3)) {
13465                         struct tcphdr *th;
13466                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13467                         th = (struct tcphdr *)&tx_data[val];
13468                         th->check = 0;
13469                 } else
13470                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
13471
13472                 if (tg3_flag(tp, HW_TSO_3)) {
13473                         mss |= (hdr_len & 0xc) << 12;
13474                         if (hdr_len & 0x10)
13475                                 base_flags |= 0x00000010;
13476                         base_flags |= (hdr_len & 0x3e0) << 5;
13477                 } else if (tg3_flag(tp, HW_TSO_2))
13478                         mss |= hdr_len << 9;
13479                 else if (tg3_flag(tp, HW_TSO_1) ||
13480                          tg3_asic_rev(tp) == ASIC_REV_5705) {
13481                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13482                 } else {
13483                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13484                 }
13485
13486                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13487         } else {
13488                 num_pkts = 1;
13489                 data_off = ETH_HLEN;
13490
13491                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13492                     tx_len > VLAN_ETH_FRAME_LEN)
13493                         base_flags |= TXD_FLAG_JMB_PKT;
13494         }
13495
13496         for (i = data_off; i < tx_len; i++)
13497                 tx_data[i] = (u8) (i & 0xff);
13498
13499         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13500         if (pci_dma_mapping_error(tp->pdev, map)) {
13501                 dev_kfree_skb(skb);
13502                 return -EIO;
13503         }
13504
13505         val = tnapi->tx_prod;
13506         tnapi->tx_buffers[val].skb = skb;
13507         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13508
13509         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13510                rnapi->coal_now);
13511
13512         udelay(10);
13513
13514         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13515
13516         budget = tg3_tx_avail(tnapi);
13517         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13518                             base_flags | TXD_FLAG_END, mss, 0)) {
13519                 tnapi->tx_buffers[val].skb = NULL;
13520                 dev_kfree_skb(skb);
13521                 return -EIO;
13522         }
13523
13524         tnapi->tx_prod++;
13525
13526         /* Sync BD data before updating mailbox */
13527         wmb();
13528
13529         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13530         tr32_mailbox(tnapi->prodmbox);
13531
13532         udelay(10);
13533
13534         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13535         for (i = 0; i < 35; i++) {
13536                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13537                        coal_now);
13538
13539                 udelay(10);
13540
13541                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13542                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13543                 if ((tx_idx == tnapi->tx_prod) &&
13544                     (rx_idx == (rx_start_idx + num_pkts)))
13545                         break;
13546         }
13547
13548         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13549         dev_kfree_skb(skb);
13550
13551         if (tx_idx != tnapi->tx_prod)
13552                 goto out;
13553
13554         if (rx_idx != rx_start_idx + num_pkts)
13555                 goto out;
13556
13557         val = data_off;
13558         while (rx_idx != rx_start_idx) {
13559                 desc = &rnapi->rx_rcb[rx_start_idx++];
13560                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13561                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13562
13563                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13564                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13565                         goto out;
13566
13567                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13568                          - ETH_FCS_LEN;
13569
13570                 if (!tso_loopback) {
13571                         if (rx_len != tx_len)
13572                                 goto out;
13573
13574                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13575                                 if (opaque_key != RXD_OPAQUE_RING_STD)
13576                                         goto out;
13577                         } else {
13578                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13579                                         goto out;
13580                         }
13581                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13582                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13583                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
13584                         goto out;
13585                 }
13586
13587                 if (opaque_key == RXD_OPAQUE_RING_STD) {
13588                         rx_data = tpr->rx_std_buffers[desc_idx].data;
13589                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13590                                              mapping);
13591                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13592                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13593                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13594                                              mapping);
13595                 } else
13596                         goto out;
13597
13598                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13599                                             PCI_DMA_FROMDEVICE);
13600
13601                 rx_data += TG3_RX_OFFSET(tp);
13602                 for (i = data_off; i < rx_len; i++, val++) {
13603                         if (*(rx_data + i) != (u8) (val & 0xff))
13604                                 goto out;
13605                 }
13606         }
13607
13608         err = 0;
13609
13610         /* tg3_free_rings will unmap and free the rx_data */
13611 out:
13612         return err;
13613 }
13614
13615 #define TG3_STD_LOOPBACK_FAILED         1
13616 #define TG3_JMB_LOOPBACK_FAILED         2
13617 #define TG3_TSO_LOOPBACK_FAILED         4
13618 #define TG3_LOOPBACK_FAILED \
13619         (TG3_STD_LOOPBACK_FAILED | \
13620          TG3_JMB_LOOPBACK_FAILED | \
13621          TG3_TSO_LOOPBACK_FAILED)
13622
13623 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13624 {
13625         int err = -EIO;
13626         u32 eee_cap;
13627         u32 jmb_pkt_sz = 9000;
13628
13629         if (tp->dma_limit)
13630                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13631
13632         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13633         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13634
13635         if (!netif_running(tp->dev)) {
13636                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13637                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13638                 if (do_extlpbk)
13639                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13640                 goto done;
13641         }
13642
13643         err = tg3_reset_hw(tp, true);
13644         if (err) {
13645                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13646                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13647                 if (do_extlpbk)
13648                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13649                 goto done;
13650         }
13651
13652         if (tg3_flag(tp, ENABLE_RSS)) {
13653                 int i;
13654
13655                 /* Reroute all rx packets to the 1st queue */
13656                 for (i = MAC_RSS_INDIR_TBL_0;
13657                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13658                         tw32(i, 0x0);
13659         }
13660
13661         /* HW errata - mac loopback fails in some cases on 5780.
13662          * Normal traffic and PHY loopback are not affected by
13663          * errata.  Also, the MAC loopback test is deprecated for
13664          * all newer ASIC revisions.
13665          */
13666         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13667             !tg3_flag(tp, CPMU_PRESENT)) {
13668                 tg3_mac_loopback(tp, true);
13669
13670                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13671                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13672
13673                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13674                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13675                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13676
13677                 tg3_mac_loopback(tp, false);
13678         }
13679
13680         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13681             !tg3_flag(tp, USE_PHYLIB)) {
13682                 int i;
13683
13684                 tg3_phy_lpbk_set(tp, 0, false);
13685
13686                 /* Wait for link */
13687                 for (i = 0; i < 100; i++) {
13688                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13689                                 break;
13690                         mdelay(1);
13691                 }
13692
13693                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13694                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13695                 if (tg3_flag(tp, TSO_CAPABLE) &&
13696                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13697                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13698                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13699                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13700                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13701
13702                 if (do_extlpbk) {
13703                         tg3_phy_lpbk_set(tp, 0, true);
13704
13705                         /* All link indications report up, but the hardware
13706                          * isn't really ready for about 20 msec.  Double it
13707                          * to be sure.
13708                          */
13709                         mdelay(40);
13710
13711                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13712                                 data[TG3_EXT_LOOPB_TEST] |=
13713                                                         TG3_STD_LOOPBACK_FAILED;
13714                         if (tg3_flag(tp, TSO_CAPABLE) &&
13715                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13716                                 data[TG3_EXT_LOOPB_TEST] |=
13717                                                         TG3_TSO_LOOPBACK_FAILED;
13718                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13719                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13720                                 data[TG3_EXT_LOOPB_TEST] |=
13721                                                         TG3_JMB_LOOPBACK_FAILED;
13722                 }
13723
13724                 /* Re-enable gphy autopowerdown. */
13725                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13726                         tg3_phy_toggle_apd(tp, true);
13727         }
13728
13729         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13730                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13731
13732 done:
13733         tp->phy_flags |= eee_cap;
13734
13735         return err;
13736 }
13737
13738 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13739                           u64 *data)
13740 {
13741         struct tg3 *tp = netdev_priv(dev);
13742         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13743
13744         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13745                 if (tg3_power_up(tp)) {
13746                         etest->flags |= ETH_TEST_FL_FAILED;
13747                         memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13748                         return;
13749                 }
13750                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13751         }
13752
13753         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13754
13755         if (tg3_test_nvram(tp) != 0) {
13756                 etest->flags |= ETH_TEST_FL_FAILED;
13757                 data[TG3_NVRAM_TEST] = 1;
13758         }
13759         if (!doextlpbk && tg3_test_link(tp)) {
13760                 etest->flags |= ETH_TEST_FL_FAILED;
13761                 data[TG3_LINK_TEST] = 1;
13762         }
13763         if (etest->flags & ETH_TEST_FL_OFFLINE) {
13764                 int err, err2 = 0, irq_sync = 0;
13765
13766                 if (netif_running(dev)) {
13767                         tg3_phy_stop(tp);
13768                         tg3_netif_stop(tp);
13769                         irq_sync = 1;
13770                 }
13771
13772                 tg3_full_lock(tp, irq_sync);
13773                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13774                 err = tg3_nvram_lock(tp);
13775                 tg3_halt_cpu(tp, RX_CPU_BASE);
13776                 if (!tg3_flag(tp, 5705_PLUS))
13777                         tg3_halt_cpu(tp, TX_CPU_BASE);
13778                 if (!err)
13779                         tg3_nvram_unlock(tp);
13780
13781                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13782                         tg3_phy_reset(tp);
13783
13784                 if (tg3_test_registers(tp) != 0) {
13785                         etest->flags |= ETH_TEST_FL_FAILED;
13786                         data[TG3_REGISTER_TEST] = 1;
13787                 }
13788
13789                 if (tg3_test_memory(tp) != 0) {
13790                         etest->flags |= ETH_TEST_FL_FAILED;
13791                         data[TG3_MEMORY_TEST] = 1;
13792                 }
13793
13794                 if (doextlpbk)
13795                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13796
13797                 if (tg3_test_loopback(tp, data, doextlpbk))
13798                         etest->flags |= ETH_TEST_FL_FAILED;
13799
13800                 tg3_full_unlock(tp);
13801
13802                 if (tg3_test_interrupt(tp) != 0) {
13803                         etest->flags |= ETH_TEST_FL_FAILED;
13804                         data[TG3_INTERRUPT_TEST] = 1;
13805                 }
13806
13807                 tg3_full_lock(tp, 0);
13808
13809                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13810                 if (netif_running(dev)) {
13811                         tg3_flag_set(tp, INIT_COMPLETE);
13812                         err2 = tg3_restart_hw(tp, true);
13813                         if (!err2)
13814                                 tg3_netif_start(tp);
13815                 }
13816
13817                 tg3_full_unlock(tp);
13818
13819                 if (irq_sync && !err2)
13820                         tg3_phy_start(tp);
13821         }
13822         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13823                 tg3_power_down_prepare(tp);
13824
13825 }
13826
13827 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13828 {
13829         struct tg3 *tp = netdev_priv(dev);
13830         struct hwtstamp_config stmpconf;
13831
13832         if (!tg3_flag(tp, PTP_CAPABLE))
13833                 return -EOPNOTSUPP;
13834
13835         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13836                 return -EFAULT;
13837
13838         if (stmpconf.flags)
13839                 return -EINVAL;
13840
13841         if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13842             stmpconf.tx_type != HWTSTAMP_TX_OFF)
13843                 return -ERANGE;
13844
13845         switch (stmpconf.rx_filter) {
13846         case HWTSTAMP_FILTER_NONE:
13847                 tp->rxptpctl = 0;
13848                 break;
13849         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13850                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13851                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13852                 break;
13853         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13854                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13855                                TG3_RX_PTP_CTL_SYNC_EVNT;
13856                 break;
13857         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13858                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13859                                TG3_RX_PTP_CTL_DELAY_REQ;
13860                 break;
13861         case HWTSTAMP_FILTER_PTP_V2_EVENT:
13862                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13863                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13864                 break;
13865         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13866                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13867                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13868                 break;
13869         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13870                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13871                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13872                 break;
13873         case HWTSTAMP_FILTER_PTP_V2_SYNC:
13874                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13875                                TG3_RX_PTP_CTL_SYNC_EVNT;
13876                 break;
13877         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13878                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13879                                TG3_RX_PTP_CTL_SYNC_EVNT;
13880                 break;
13881         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13882                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13883                                TG3_RX_PTP_CTL_SYNC_EVNT;
13884                 break;
13885         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13886                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13887                                TG3_RX_PTP_CTL_DELAY_REQ;
13888                 break;
13889         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13890                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13891                                TG3_RX_PTP_CTL_DELAY_REQ;
13892                 break;
13893         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13894                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13895                                TG3_RX_PTP_CTL_DELAY_REQ;
13896                 break;
13897         default:
13898                 return -ERANGE;
13899         }
13900
13901         if (netif_running(dev) && tp->rxptpctl)
13902                 tw32(TG3_RX_PTP_CTL,
13903                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13904
13905         if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13906                 tg3_flag_set(tp, TX_TSTAMP_EN);
13907         else
13908                 tg3_flag_clear(tp, TX_TSTAMP_EN);
13909
13910         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13911                 -EFAULT : 0;
13912 }
13913
13914 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13915 {
13916         struct tg3 *tp = netdev_priv(dev);
13917         struct hwtstamp_config stmpconf;
13918
13919         if (!tg3_flag(tp, PTP_CAPABLE))
13920                 return -EOPNOTSUPP;
13921
13922         stmpconf.flags = 0;
13923         stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13924                             HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13925
13926         switch (tp->rxptpctl) {
13927         case 0:
13928                 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13929                 break;
13930         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13931                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13932                 break;
13933         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13934                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13935                 break;
13936         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13937                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13938                 break;
13939         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13940                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13941                 break;
13942         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13943                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13944                 break;
13945         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13946                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13947                 break;
13948         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13949                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13950                 break;
13951         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13952                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13953                 break;
13954         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13955                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13956                 break;
13957         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13958                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13959                 break;
13960         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13961                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13962                 break;
13963         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13964                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13965                 break;
13966         default:
13967                 WARN_ON_ONCE(1);
13968                 return -ERANGE;
13969         }
13970
13971         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13972                 -EFAULT : 0;
13973 }
13974
13975 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13976 {
13977         struct mii_ioctl_data *data = if_mii(ifr);
13978         struct tg3 *tp = netdev_priv(dev);
13979         int err;
13980
13981         if (tg3_flag(tp, USE_PHYLIB)) {
13982                 struct phy_device *phydev;
13983                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13984                         return -EAGAIN;
13985                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
13986                 return phy_mii_ioctl(phydev, ifr, cmd);
13987         }
13988
13989         switch (cmd) {
13990         case SIOCGMIIPHY:
13991                 data->phy_id = tp->phy_addr;
13992
13993                 /* fallthru */
13994         case SIOCGMIIREG: {
13995                 u32 mii_regval;
13996
13997                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13998                         break;                  /* We have no PHY */
13999
14000                 if (!netif_running(dev))
14001                         return -EAGAIN;
14002
14003                 spin_lock_bh(&tp->lock);
14004                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
14005                                     data->reg_num & 0x1f, &mii_regval);
14006                 spin_unlock_bh(&tp->lock);
14007
14008                 data->val_out = mii_regval;
14009
14010                 return err;
14011         }
14012
14013         case SIOCSMIIREG:
14014                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14015                         break;                  /* We have no PHY */
14016
14017                 if (!netif_running(dev))
14018                         return -EAGAIN;
14019
14020                 spin_lock_bh(&tp->lock);
14021                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
14022                                      data->reg_num & 0x1f, data->val_in);
14023                 spin_unlock_bh(&tp->lock);
14024
14025                 return err;
14026
14027         case SIOCSHWTSTAMP:
14028                 return tg3_hwtstamp_set(dev, ifr);
14029
14030         case SIOCGHWTSTAMP:
14031                 return tg3_hwtstamp_get(dev, ifr);
14032
14033         default:
14034                 /* do nothing */
14035                 break;
14036         }
14037         return -EOPNOTSUPP;
14038 }
14039
14040 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14041 {
14042         struct tg3 *tp = netdev_priv(dev);
14043
14044         memcpy(ec, &tp->coal, sizeof(*ec));
14045         return 0;
14046 }
14047
14048 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14049 {
14050         struct tg3 *tp = netdev_priv(dev);
14051         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14052         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14053
14054         if (!tg3_flag(tp, 5705_PLUS)) {
14055                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14056                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14057                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14058                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14059         }
14060
14061         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14062             (!ec->rx_coalesce_usecs) ||
14063             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14064             (!ec->tx_coalesce_usecs) ||
14065             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14066             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14067             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14068             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14069             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14070             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14071             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14072             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14073                 return -EINVAL;
14074
14075         /* Only copy relevant parameters, ignore all others. */
14076         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14077         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14078         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14079         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14080         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14081         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14082         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14083         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14084         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14085
14086         if (netif_running(dev)) {
14087                 tg3_full_lock(tp, 0);
14088                 __tg3_set_coalesce(tp, &tp->coal);
14089                 tg3_full_unlock(tp);
14090         }
14091         return 0;
14092 }
14093
14094 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14095 {
14096         struct tg3 *tp = netdev_priv(dev);
14097
14098         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14099                 netdev_warn(tp->dev, "Board does not support EEE!\n");
14100                 return -EOPNOTSUPP;
14101         }
14102
14103         if (edata->advertised != tp->eee.advertised) {
14104                 netdev_warn(tp->dev,
14105                             "Direct manipulation of EEE advertisement is not supported\n");
14106                 return -EINVAL;
14107         }
14108
14109         if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14110                 netdev_warn(tp->dev,
14111                             "Maximal Tx Lpi timer supported is %#x(u)\n",
14112                             TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14113                 return -EINVAL;
14114         }
14115
14116         tp->eee = *edata;
14117
14118         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14119         tg3_warn_mgmt_link_flap(tp);
14120
14121         if (netif_running(tp->dev)) {
14122                 tg3_full_lock(tp, 0);
14123                 tg3_setup_eee(tp);
14124                 tg3_phy_reset(tp);
14125                 tg3_full_unlock(tp);
14126         }
14127
14128         return 0;
14129 }
14130
14131 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14132 {
14133         struct tg3 *tp = netdev_priv(dev);
14134
14135         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14136                 netdev_warn(tp->dev,
14137                             "Board does not support EEE!\n");
14138                 return -EOPNOTSUPP;
14139         }
14140
14141         *edata = tp->eee;
14142         return 0;
14143 }
14144
14145 static const struct ethtool_ops tg3_ethtool_ops = {
14146         .get_drvinfo            = tg3_get_drvinfo,
14147         .get_regs_len           = tg3_get_regs_len,
14148         .get_regs               = tg3_get_regs,
14149         .get_wol                = tg3_get_wol,
14150         .set_wol                = tg3_set_wol,
14151         .get_msglevel           = tg3_get_msglevel,
14152         .set_msglevel           = tg3_set_msglevel,
14153         .nway_reset             = tg3_nway_reset,
14154         .get_link               = ethtool_op_get_link,
14155         .get_eeprom_len         = tg3_get_eeprom_len,
14156         .get_eeprom             = tg3_get_eeprom,
14157         .set_eeprom             = tg3_set_eeprom,
14158         .get_ringparam          = tg3_get_ringparam,
14159         .set_ringparam          = tg3_set_ringparam,
14160         .get_pauseparam         = tg3_get_pauseparam,
14161         .set_pauseparam         = tg3_set_pauseparam,
14162         .self_test              = tg3_self_test,
14163         .get_strings            = tg3_get_strings,
14164         .set_phys_id            = tg3_set_phys_id,
14165         .get_ethtool_stats      = tg3_get_ethtool_stats,
14166         .get_coalesce           = tg3_get_coalesce,
14167         .set_coalesce           = tg3_set_coalesce,
14168         .get_sset_count         = tg3_get_sset_count,
14169         .get_rxnfc              = tg3_get_rxnfc,
14170         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14171         .get_rxfh               = tg3_get_rxfh,
14172         .set_rxfh               = tg3_set_rxfh,
14173         .get_channels           = tg3_get_channels,
14174         .set_channels           = tg3_set_channels,
14175         .get_ts_info            = tg3_get_ts_info,
14176         .get_eee                = tg3_get_eee,
14177         .set_eee                = tg3_set_eee,
14178         .get_link_ksettings     = tg3_get_link_ksettings,
14179         .set_link_ksettings     = tg3_set_link_ksettings,
14180 };
14181
14182 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
14183                                                 struct rtnl_link_stats64 *stats)
14184 {
14185         struct tg3 *tp = netdev_priv(dev);
14186
14187         spin_lock_bh(&tp->lock);
14188         if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14189                 *stats = tp->net_stats_prev;
14190                 spin_unlock_bh(&tp->lock);
14191                 return stats;
14192         }
14193
14194         tg3_get_nstats(tp, stats);
14195         spin_unlock_bh(&tp->lock);
14196
14197         return stats;
14198 }
14199
14200 static void tg3_set_rx_mode(struct net_device *dev)
14201 {
14202         struct tg3 *tp = netdev_priv(dev);
14203
14204         if (!netif_running(dev))
14205                 return;
14206
14207         tg3_full_lock(tp, 0);
14208         __tg3_set_rx_mode(dev);
14209         tg3_full_unlock(tp);
14210 }
14211
14212 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14213                                int new_mtu)
14214 {
14215         dev->mtu = new_mtu;
14216
14217         if (new_mtu > ETH_DATA_LEN) {
14218                 if (tg3_flag(tp, 5780_CLASS)) {
14219                         netdev_update_features(dev);
14220                         tg3_flag_clear(tp, TSO_CAPABLE);
14221                 } else {
14222                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
14223                 }
14224         } else {
14225                 if (tg3_flag(tp, 5780_CLASS)) {
14226                         tg3_flag_set(tp, TSO_CAPABLE);
14227                         netdev_update_features(dev);
14228                 }
14229                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14230         }
14231 }
14232
14233 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14234 {
14235         struct tg3 *tp = netdev_priv(dev);
14236         int err;
14237         bool reset_phy = false;
14238
14239         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
14240                 return -EINVAL;
14241
14242         if (!netif_running(dev)) {
14243                 /* We'll just catch it later when the
14244                  * device is up'd.
14245                  */
14246                 tg3_set_mtu(dev, tp, new_mtu);
14247                 return 0;
14248         }
14249
14250         tg3_phy_stop(tp);
14251
14252         tg3_netif_stop(tp);
14253
14254         tg3_set_mtu(dev, tp, new_mtu);
14255
14256         tg3_full_lock(tp, 1);
14257
14258         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14259
14260         /* Reset PHY, otherwise the read DMA engine will be in a mode that
14261          * breaks all requests to 256 bytes.
14262          */
14263         if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14264             tg3_asic_rev(tp) == ASIC_REV_5717 ||
14265             tg3_asic_rev(tp) == ASIC_REV_5719 ||
14266             tg3_asic_rev(tp) == ASIC_REV_5720)
14267                 reset_phy = true;
14268
14269         err = tg3_restart_hw(tp, reset_phy);
14270
14271         if (!err)
14272                 tg3_netif_start(tp);
14273
14274         tg3_full_unlock(tp);
14275
14276         if (!err)
14277                 tg3_phy_start(tp);
14278
14279         return err;
14280 }
14281
14282 static const struct net_device_ops tg3_netdev_ops = {
14283         .ndo_open               = tg3_open,
14284         .ndo_stop               = tg3_close,
14285         .ndo_start_xmit         = tg3_start_xmit,
14286         .ndo_get_stats64        = tg3_get_stats64,
14287         .ndo_validate_addr      = eth_validate_addr,
14288         .ndo_set_rx_mode        = tg3_set_rx_mode,
14289         .ndo_set_mac_address    = tg3_set_mac_addr,
14290         .ndo_do_ioctl           = tg3_ioctl,
14291         .ndo_tx_timeout         = tg3_tx_timeout,
14292         .ndo_change_mtu         = tg3_change_mtu,
14293         .ndo_fix_features       = tg3_fix_features,
14294         .ndo_set_features       = tg3_set_features,
14295 #ifdef CONFIG_NET_POLL_CONTROLLER
14296         .ndo_poll_controller    = tg3_poll_controller,
14297 #endif
14298 };
14299
14300 static void tg3_get_eeprom_size(struct tg3 *tp)
14301 {
14302         u32 cursize, val, magic;
14303
14304         tp->nvram_size = EEPROM_CHIP_SIZE;
14305
14306         if (tg3_nvram_read(tp, 0, &magic) != 0)
14307                 return;
14308
14309         if ((magic != TG3_EEPROM_MAGIC) &&
14310             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14311             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14312                 return;
14313
14314         /*
14315          * Size the chip by reading offsets at increasing powers of two.
14316          * When we encounter our validation signature, we know the addressing
14317          * has wrapped around, and thus have our chip size.
14318          */
14319         cursize = 0x10;
14320
14321         while (cursize < tp->nvram_size) {
14322                 if (tg3_nvram_read(tp, cursize, &val) != 0)
14323                         return;
14324
14325                 if (val == magic)
14326                         break;
14327
14328                 cursize <<= 1;
14329         }
14330
14331         tp->nvram_size = cursize;
14332 }
14333
14334 static void tg3_get_nvram_size(struct tg3 *tp)
14335 {
14336         u32 val;
14337
14338         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14339                 return;
14340
14341         /* Selfboot format */
14342         if (val != TG3_EEPROM_MAGIC) {
14343                 tg3_get_eeprom_size(tp);
14344                 return;
14345         }
14346
14347         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14348                 if (val != 0) {
14349                         /* This is confusing.  We want to operate on the
14350                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14351                          * call will read from NVRAM and byteswap the data
14352                          * according to the byteswapping settings for all
14353                          * other register accesses.  This ensures the data we
14354                          * want will always reside in the lower 16-bits.
14355                          * However, the data in NVRAM is in LE format, which
14356                          * means the data from the NVRAM read will always be
14357                          * opposite the endianness of the CPU.  The 16-bit
14358                          * byteswap then brings the data to CPU endianness.
14359                          */
14360                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14361                         return;
14362                 }
14363         }
14364         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14365 }
14366
14367 static void tg3_get_nvram_info(struct tg3 *tp)
14368 {
14369         u32 nvcfg1;
14370
14371         nvcfg1 = tr32(NVRAM_CFG1);
14372         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14373                 tg3_flag_set(tp, FLASH);
14374         } else {
14375                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14376                 tw32(NVRAM_CFG1, nvcfg1);
14377         }
14378
14379         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14380             tg3_flag(tp, 5780_CLASS)) {
14381                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14382                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14383                         tp->nvram_jedecnum = JEDEC_ATMEL;
14384                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14385                         tg3_flag_set(tp, NVRAM_BUFFERED);
14386                         break;
14387                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14388                         tp->nvram_jedecnum = JEDEC_ATMEL;
14389                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14390                         break;
14391                 case FLASH_VENDOR_ATMEL_EEPROM:
14392                         tp->nvram_jedecnum = JEDEC_ATMEL;
14393                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14394                         tg3_flag_set(tp, NVRAM_BUFFERED);
14395                         break;
14396                 case FLASH_VENDOR_ST:
14397                         tp->nvram_jedecnum = JEDEC_ST;
14398                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14399                         tg3_flag_set(tp, NVRAM_BUFFERED);
14400                         break;
14401                 case FLASH_VENDOR_SAIFUN:
14402                         tp->nvram_jedecnum = JEDEC_SAIFUN;
14403                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14404                         break;
14405                 case FLASH_VENDOR_SST_SMALL:
14406                 case FLASH_VENDOR_SST_LARGE:
14407                         tp->nvram_jedecnum = JEDEC_SST;
14408                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14409                         break;
14410                 }
14411         } else {
14412                 tp->nvram_jedecnum = JEDEC_ATMEL;
14413                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14414                 tg3_flag_set(tp, NVRAM_BUFFERED);
14415         }
14416 }
14417
14418 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14419 {
14420         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14421         case FLASH_5752PAGE_SIZE_256:
14422                 tp->nvram_pagesize = 256;
14423                 break;
14424         case FLASH_5752PAGE_SIZE_512:
14425                 tp->nvram_pagesize = 512;
14426                 break;
14427         case FLASH_5752PAGE_SIZE_1K:
14428                 tp->nvram_pagesize = 1024;
14429                 break;
14430         case FLASH_5752PAGE_SIZE_2K:
14431                 tp->nvram_pagesize = 2048;
14432                 break;
14433         case FLASH_5752PAGE_SIZE_4K:
14434                 tp->nvram_pagesize = 4096;
14435                 break;
14436         case FLASH_5752PAGE_SIZE_264:
14437                 tp->nvram_pagesize = 264;
14438                 break;
14439         case FLASH_5752PAGE_SIZE_528:
14440                 tp->nvram_pagesize = 528;
14441                 break;
14442         }
14443 }
14444
14445 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14446 {
14447         u32 nvcfg1;
14448
14449         nvcfg1 = tr32(NVRAM_CFG1);
14450
14451         /* NVRAM protection for TPM */
14452         if (nvcfg1 & (1 << 27))
14453                 tg3_flag_set(tp, PROTECTED_NVRAM);
14454
14455         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14456         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14457         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14458                 tp->nvram_jedecnum = JEDEC_ATMEL;
14459                 tg3_flag_set(tp, NVRAM_BUFFERED);
14460                 break;
14461         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14462                 tp->nvram_jedecnum = JEDEC_ATMEL;
14463                 tg3_flag_set(tp, NVRAM_BUFFERED);
14464                 tg3_flag_set(tp, FLASH);
14465                 break;
14466         case FLASH_5752VENDOR_ST_M45PE10:
14467         case FLASH_5752VENDOR_ST_M45PE20:
14468         case FLASH_5752VENDOR_ST_M45PE40:
14469                 tp->nvram_jedecnum = JEDEC_ST;
14470                 tg3_flag_set(tp, NVRAM_BUFFERED);
14471                 tg3_flag_set(tp, FLASH);
14472                 break;
14473         }
14474
14475         if (tg3_flag(tp, FLASH)) {
14476                 tg3_nvram_get_pagesize(tp, nvcfg1);
14477         } else {
14478                 /* For eeprom, set pagesize to maximum eeprom size */
14479                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14480
14481                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14482                 tw32(NVRAM_CFG1, nvcfg1);
14483         }
14484 }
14485
14486 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14487 {
14488         u32 nvcfg1, protect = 0;
14489
14490         nvcfg1 = tr32(NVRAM_CFG1);
14491
14492         /* NVRAM protection for TPM */
14493         if (nvcfg1 & (1 << 27)) {
14494                 tg3_flag_set(tp, PROTECTED_NVRAM);
14495                 protect = 1;
14496         }
14497
14498         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14499         switch (nvcfg1) {
14500         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14501         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14502         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14503         case FLASH_5755VENDOR_ATMEL_FLASH_5:
14504                 tp->nvram_jedecnum = JEDEC_ATMEL;
14505                 tg3_flag_set(tp, NVRAM_BUFFERED);
14506                 tg3_flag_set(tp, FLASH);
14507                 tp->nvram_pagesize = 264;
14508                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14509                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14510                         tp->nvram_size = (protect ? 0x3e200 :
14511                                           TG3_NVRAM_SIZE_512KB);
14512                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14513                         tp->nvram_size = (protect ? 0x1f200 :
14514                                           TG3_NVRAM_SIZE_256KB);
14515                 else
14516                         tp->nvram_size = (protect ? 0x1f200 :
14517                                           TG3_NVRAM_SIZE_128KB);
14518                 break;
14519         case FLASH_5752VENDOR_ST_M45PE10:
14520         case FLASH_5752VENDOR_ST_M45PE20:
14521         case FLASH_5752VENDOR_ST_M45PE40:
14522                 tp->nvram_jedecnum = JEDEC_ST;
14523                 tg3_flag_set(tp, NVRAM_BUFFERED);
14524                 tg3_flag_set(tp, FLASH);
14525                 tp->nvram_pagesize = 256;
14526                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14527                         tp->nvram_size = (protect ?
14528                                           TG3_NVRAM_SIZE_64KB :
14529                                           TG3_NVRAM_SIZE_128KB);
14530                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14531                         tp->nvram_size = (protect ?
14532                                           TG3_NVRAM_SIZE_64KB :
14533                                           TG3_NVRAM_SIZE_256KB);
14534                 else
14535                         tp->nvram_size = (protect ?
14536                                           TG3_NVRAM_SIZE_128KB :
14537                                           TG3_NVRAM_SIZE_512KB);
14538                 break;
14539         }
14540 }
14541
14542 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14543 {
14544         u32 nvcfg1;
14545
14546         nvcfg1 = tr32(NVRAM_CFG1);
14547
14548         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14549         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14550         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14551         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14552         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14553                 tp->nvram_jedecnum = JEDEC_ATMEL;
14554                 tg3_flag_set(tp, NVRAM_BUFFERED);
14555                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14556
14557                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14558                 tw32(NVRAM_CFG1, nvcfg1);
14559                 break;
14560         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14561         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14562         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14563         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14564                 tp->nvram_jedecnum = JEDEC_ATMEL;
14565                 tg3_flag_set(tp, NVRAM_BUFFERED);
14566                 tg3_flag_set(tp, FLASH);
14567                 tp->nvram_pagesize = 264;
14568                 break;
14569         case FLASH_5752VENDOR_ST_M45PE10:
14570         case FLASH_5752VENDOR_ST_M45PE20:
14571         case FLASH_5752VENDOR_ST_M45PE40:
14572                 tp->nvram_jedecnum = JEDEC_ST;
14573                 tg3_flag_set(tp, NVRAM_BUFFERED);
14574                 tg3_flag_set(tp, FLASH);
14575                 tp->nvram_pagesize = 256;
14576                 break;
14577         }
14578 }
14579
14580 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14581 {
14582         u32 nvcfg1, protect = 0;
14583
14584         nvcfg1 = tr32(NVRAM_CFG1);
14585
14586         /* NVRAM protection for TPM */
14587         if (nvcfg1 & (1 << 27)) {
14588                 tg3_flag_set(tp, PROTECTED_NVRAM);
14589                 protect = 1;
14590         }
14591
14592         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14593         switch (nvcfg1) {
14594         case FLASH_5761VENDOR_ATMEL_ADB021D:
14595         case FLASH_5761VENDOR_ATMEL_ADB041D:
14596         case FLASH_5761VENDOR_ATMEL_ADB081D:
14597         case FLASH_5761VENDOR_ATMEL_ADB161D:
14598         case FLASH_5761VENDOR_ATMEL_MDB021D:
14599         case FLASH_5761VENDOR_ATMEL_MDB041D:
14600         case FLASH_5761VENDOR_ATMEL_MDB081D:
14601         case FLASH_5761VENDOR_ATMEL_MDB161D:
14602                 tp->nvram_jedecnum = JEDEC_ATMEL;
14603                 tg3_flag_set(tp, NVRAM_BUFFERED);
14604                 tg3_flag_set(tp, FLASH);
14605                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14606                 tp->nvram_pagesize = 256;
14607                 break;
14608         case FLASH_5761VENDOR_ST_A_M45PE20:
14609         case FLASH_5761VENDOR_ST_A_M45PE40:
14610         case FLASH_5761VENDOR_ST_A_M45PE80:
14611         case FLASH_5761VENDOR_ST_A_M45PE16:
14612         case FLASH_5761VENDOR_ST_M_M45PE20:
14613         case FLASH_5761VENDOR_ST_M_M45PE40:
14614         case FLASH_5761VENDOR_ST_M_M45PE80:
14615         case FLASH_5761VENDOR_ST_M_M45PE16:
14616                 tp->nvram_jedecnum = JEDEC_ST;
14617                 tg3_flag_set(tp, NVRAM_BUFFERED);
14618                 tg3_flag_set(tp, FLASH);
14619                 tp->nvram_pagesize = 256;
14620                 break;
14621         }
14622
14623         if (protect) {
14624                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14625         } else {
14626                 switch (nvcfg1) {
14627                 case FLASH_5761VENDOR_ATMEL_ADB161D:
14628                 case FLASH_5761VENDOR_ATMEL_MDB161D:
14629                 case FLASH_5761VENDOR_ST_A_M45PE16:
14630                 case FLASH_5761VENDOR_ST_M_M45PE16:
14631                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14632                         break;
14633                 case FLASH_5761VENDOR_ATMEL_ADB081D:
14634                 case FLASH_5761VENDOR_ATMEL_MDB081D:
14635                 case FLASH_5761VENDOR_ST_A_M45PE80:
14636                 case FLASH_5761VENDOR_ST_M_M45PE80:
14637                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14638                         break;
14639                 case FLASH_5761VENDOR_ATMEL_ADB041D:
14640                 case FLASH_5761VENDOR_ATMEL_MDB041D:
14641                 case FLASH_5761VENDOR_ST_A_M45PE40:
14642                 case FLASH_5761VENDOR_ST_M_M45PE40:
14643                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14644                         break;
14645                 case FLASH_5761VENDOR_ATMEL_ADB021D:
14646                 case FLASH_5761VENDOR_ATMEL_MDB021D:
14647                 case FLASH_5761VENDOR_ST_A_M45PE20:
14648                 case FLASH_5761VENDOR_ST_M_M45PE20:
14649                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14650                         break;
14651                 }
14652         }
14653 }
14654
14655 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14656 {
14657         tp->nvram_jedecnum = JEDEC_ATMEL;
14658         tg3_flag_set(tp, NVRAM_BUFFERED);
14659         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14660 }
14661
14662 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14663 {
14664         u32 nvcfg1;
14665
14666         nvcfg1 = tr32(NVRAM_CFG1);
14667
14668         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14669         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14670         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14671                 tp->nvram_jedecnum = JEDEC_ATMEL;
14672                 tg3_flag_set(tp, NVRAM_BUFFERED);
14673                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14674
14675                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14676                 tw32(NVRAM_CFG1, nvcfg1);
14677                 return;
14678         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14679         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14680         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14681         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14682         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14683         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14684         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14685                 tp->nvram_jedecnum = JEDEC_ATMEL;
14686                 tg3_flag_set(tp, NVRAM_BUFFERED);
14687                 tg3_flag_set(tp, FLASH);
14688
14689                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14690                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14691                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14692                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14693                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14694                         break;
14695                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14696                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14697                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14698                         break;
14699                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14700                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14701                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14702                         break;
14703                 }
14704                 break;
14705         case FLASH_5752VENDOR_ST_M45PE10:
14706         case FLASH_5752VENDOR_ST_M45PE20:
14707         case FLASH_5752VENDOR_ST_M45PE40:
14708                 tp->nvram_jedecnum = JEDEC_ST;
14709                 tg3_flag_set(tp, NVRAM_BUFFERED);
14710                 tg3_flag_set(tp, FLASH);
14711
14712                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14713                 case FLASH_5752VENDOR_ST_M45PE10:
14714                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14715                         break;
14716                 case FLASH_5752VENDOR_ST_M45PE20:
14717                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14718                         break;
14719                 case FLASH_5752VENDOR_ST_M45PE40:
14720                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14721                         break;
14722                 }
14723                 break;
14724         default:
14725                 tg3_flag_set(tp, NO_NVRAM);
14726                 return;
14727         }
14728
14729         tg3_nvram_get_pagesize(tp, nvcfg1);
14730         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14731                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14732 }
14733
14734
14735 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14736 {
14737         u32 nvcfg1;
14738
14739         nvcfg1 = tr32(NVRAM_CFG1);
14740
14741         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14742         case FLASH_5717VENDOR_ATMEL_EEPROM:
14743         case FLASH_5717VENDOR_MICRO_EEPROM:
14744                 tp->nvram_jedecnum = JEDEC_ATMEL;
14745                 tg3_flag_set(tp, NVRAM_BUFFERED);
14746                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14747
14748                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14749                 tw32(NVRAM_CFG1, nvcfg1);
14750                 return;
14751         case FLASH_5717VENDOR_ATMEL_MDB011D:
14752         case FLASH_5717VENDOR_ATMEL_ADB011B:
14753         case FLASH_5717VENDOR_ATMEL_ADB011D:
14754         case FLASH_5717VENDOR_ATMEL_MDB021D:
14755         case FLASH_5717VENDOR_ATMEL_ADB021B:
14756         case FLASH_5717VENDOR_ATMEL_ADB021D:
14757         case FLASH_5717VENDOR_ATMEL_45USPT:
14758                 tp->nvram_jedecnum = JEDEC_ATMEL;
14759                 tg3_flag_set(tp, NVRAM_BUFFERED);
14760                 tg3_flag_set(tp, FLASH);
14761
14762                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14763                 case FLASH_5717VENDOR_ATMEL_MDB021D:
14764                         /* Detect size with tg3_nvram_get_size() */
14765                         break;
14766                 case FLASH_5717VENDOR_ATMEL_ADB021B:
14767                 case FLASH_5717VENDOR_ATMEL_ADB021D:
14768                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14769                         break;
14770                 default:
14771                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14772                         break;
14773                 }
14774                 break;
14775         case FLASH_5717VENDOR_ST_M_M25PE10:
14776         case FLASH_5717VENDOR_ST_A_M25PE10:
14777         case FLASH_5717VENDOR_ST_M_M45PE10:
14778         case FLASH_5717VENDOR_ST_A_M45PE10:
14779         case FLASH_5717VENDOR_ST_M_M25PE20:
14780         case FLASH_5717VENDOR_ST_A_M25PE20:
14781         case FLASH_5717VENDOR_ST_M_M45PE20:
14782         case FLASH_5717VENDOR_ST_A_M45PE20:
14783         case FLASH_5717VENDOR_ST_25USPT:
14784         case FLASH_5717VENDOR_ST_45USPT:
14785                 tp->nvram_jedecnum = JEDEC_ST;
14786                 tg3_flag_set(tp, NVRAM_BUFFERED);
14787                 tg3_flag_set(tp, FLASH);
14788
14789                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14790                 case FLASH_5717VENDOR_ST_M_M25PE20:
14791                 case FLASH_5717VENDOR_ST_M_M45PE20:
14792                         /* Detect size with tg3_nvram_get_size() */
14793                         break;
14794                 case FLASH_5717VENDOR_ST_A_M25PE20:
14795                 case FLASH_5717VENDOR_ST_A_M45PE20:
14796                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14797                         break;
14798                 default:
14799                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14800                         break;
14801                 }
14802                 break;
14803         default:
14804                 tg3_flag_set(tp, NO_NVRAM);
14805                 return;
14806         }
14807
14808         tg3_nvram_get_pagesize(tp, nvcfg1);
14809         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14810                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14811 }
14812
14813 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14814 {
14815         u32 nvcfg1, nvmpinstrp;
14816
14817         nvcfg1 = tr32(NVRAM_CFG1);
14818         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14819
14820         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14821                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14822                         tg3_flag_set(tp, NO_NVRAM);
14823                         return;
14824                 }
14825
14826                 switch (nvmpinstrp) {
14827                 case FLASH_5762_EEPROM_HD:
14828                         nvmpinstrp = FLASH_5720_EEPROM_HD;
14829                         break;
14830                 case FLASH_5762_EEPROM_LD:
14831                         nvmpinstrp = FLASH_5720_EEPROM_LD;
14832                         break;
14833                 case FLASH_5720VENDOR_M_ST_M45PE20:
14834                         /* This pinstrap supports multiple sizes, so force it
14835                          * to read the actual size from location 0xf0.
14836                          */
14837                         nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14838                         break;
14839                 }
14840         }
14841
14842         switch (nvmpinstrp) {
14843         case FLASH_5720_EEPROM_HD:
14844         case FLASH_5720_EEPROM_LD:
14845                 tp->nvram_jedecnum = JEDEC_ATMEL;
14846                 tg3_flag_set(tp, NVRAM_BUFFERED);
14847
14848                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14849                 tw32(NVRAM_CFG1, nvcfg1);
14850                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14851                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14852                 else
14853                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14854                 return;
14855         case FLASH_5720VENDOR_M_ATMEL_DB011D:
14856         case FLASH_5720VENDOR_A_ATMEL_DB011B:
14857         case FLASH_5720VENDOR_A_ATMEL_DB011D:
14858         case FLASH_5720VENDOR_M_ATMEL_DB021D:
14859         case FLASH_5720VENDOR_A_ATMEL_DB021B:
14860         case FLASH_5720VENDOR_A_ATMEL_DB021D:
14861         case FLASH_5720VENDOR_M_ATMEL_DB041D:
14862         case FLASH_5720VENDOR_A_ATMEL_DB041B:
14863         case FLASH_5720VENDOR_A_ATMEL_DB041D:
14864         case FLASH_5720VENDOR_M_ATMEL_DB081D:
14865         case FLASH_5720VENDOR_A_ATMEL_DB081D:
14866         case FLASH_5720VENDOR_ATMEL_45USPT:
14867                 tp->nvram_jedecnum = JEDEC_ATMEL;
14868                 tg3_flag_set(tp, NVRAM_BUFFERED);
14869                 tg3_flag_set(tp, FLASH);
14870
14871                 switch (nvmpinstrp) {
14872                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14873                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14874                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14875                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14876                         break;
14877                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14878                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14879                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14880                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14881                         break;
14882                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14883                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14884                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14885                         break;
14886                 default:
14887                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14888                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14889                         break;
14890                 }
14891                 break;
14892         case FLASH_5720VENDOR_M_ST_M25PE10:
14893         case FLASH_5720VENDOR_M_ST_M45PE10:
14894         case FLASH_5720VENDOR_A_ST_M25PE10:
14895         case FLASH_5720VENDOR_A_ST_M45PE10:
14896         case FLASH_5720VENDOR_M_ST_M25PE20:
14897         case FLASH_5720VENDOR_M_ST_M45PE20:
14898         case FLASH_5720VENDOR_A_ST_M25PE20:
14899         case FLASH_5720VENDOR_A_ST_M45PE20:
14900         case FLASH_5720VENDOR_M_ST_M25PE40:
14901         case FLASH_5720VENDOR_M_ST_M45PE40:
14902         case FLASH_5720VENDOR_A_ST_M25PE40:
14903         case FLASH_5720VENDOR_A_ST_M45PE40:
14904         case FLASH_5720VENDOR_M_ST_M25PE80:
14905         case FLASH_5720VENDOR_M_ST_M45PE80:
14906         case FLASH_5720VENDOR_A_ST_M25PE80:
14907         case FLASH_5720VENDOR_A_ST_M45PE80:
14908         case FLASH_5720VENDOR_ST_25USPT:
14909         case FLASH_5720VENDOR_ST_45USPT:
14910                 tp->nvram_jedecnum = JEDEC_ST;
14911                 tg3_flag_set(tp, NVRAM_BUFFERED);
14912                 tg3_flag_set(tp, FLASH);
14913
14914                 switch (nvmpinstrp) {
14915                 case FLASH_5720VENDOR_M_ST_M25PE20:
14916                 case FLASH_5720VENDOR_M_ST_M45PE20:
14917                 case FLASH_5720VENDOR_A_ST_M25PE20:
14918                 case FLASH_5720VENDOR_A_ST_M45PE20:
14919                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14920                         break;
14921                 case FLASH_5720VENDOR_M_ST_M25PE40:
14922                 case FLASH_5720VENDOR_M_ST_M45PE40:
14923                 case FLASH_5720VENDOR_A_ST_M25PE40:
14924                 case FLASH_5720VENDOR_A_ST_M45PE40:
14925                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14926                         break;
14927                 case FLASH_5720VENDOR_M_ST_M25PE80:
14928                 case FLASH_5720VENDOR_M_ST_M45PE80:
14929                 case FLASH_5720VENDOR_A_ST_M25PE80:
14930                 case FLASH_5720VENDOR_A_ST_M45PE80:
14931                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14932                         break;
14933                 default:
14934                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14935                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14936                         break;
14937                 }
14938                 break;
14939         default:
14940                 tg3_flag_set(tp, NO_NVRAM);
14941                 return;
14942         }
14943
14944         tg3_nvram_get_pagesize(tp, nvcfg1);
14945         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14946                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14947
14948         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14949                 u32 val;
14950
14951                 if (tg3_nvram_read(tp, 0, &val))
14952                         return;
14953
14954                 if (val != TG3_EEPROM_MAGIC &&
14955                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14956                         tg3_flag_set(tp, NO_NVRAM);
14957         }
14958 }
14959
14960 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14961 static void tg3_nvram_init(struct tg3 *tp)
14962 {
14963         if (tg3_flag(tp, IS_SSB_CORE)) {
14964                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14965                 tg3_flag_clear(tp, NVRAM);
14966                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14967                 tg3_flag_set(tp, NO_NVRAM);
14968                 return;
14969         }
14970
14971         tw32_f(GRC_EEPROM_ADDR,
14972              (EEPROM_ADDR_FSM_RESET |
14973               (EEPROM_DEFAULT_CLOCK_PERIOD <<
14974                EEPROM_ADDR_CLKPERD_SHIFT)));
14975
14976         msleep(1);
14977
14978         /* Enable seeprom accesses. */
14979         tw32_f(GRC_LOCAL_CTRL,
14980              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14981         udelay(100);
14982
14983         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14984             tg3_asic_rev(tp) != ASIC_REV_5701) {
14985                 tg3_flag_set(tp, NVRAM);
14986
14987                 if (tg3_nvram_lock(tp)) {
14988                         netdev_warn(tp->dev,
14989                                     "Cannot get nvram lock, %s failed\n",
14990                                     __func__);
14991                         return;
14992                 }
14993                 tg3_enable_nvram_access(tp);
14994
14995                 tp->nvram_size = 0;
14996
14997                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14998                         tg3_get_5752_nvram_info(tp);
14999                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15000                         tg3_get_5755_nvram_info(tp);
15001                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15002                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
15003                          tg3_asic_rev(tp) == ASIC_REV_5785)
15004                         tg3_get_5787_nvram_info(tp);
15005                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15006                         tg3_get_5761_nvram_info(tp);
15007                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15008                         tg3_get_5906_nvram_info(tp);
15009                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15010                          tg3_flag(tp, 57765_CLASS))
15011                         tg3_get_57780_nvram_info(tp);
15012                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15013                          tg3_asic_rev(tp) == ASIC_REV_5719)
15014                         tg3_get_5717_nvram_info(tp);
15015                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15016                          tg3_asic_rev(tp) == ASIC_REV_5762)
15017                         tg3_get_5720_nvram_info(tp);
15018                 else
15019                         tg3_get_nvram_info(tp);
15020
15021                 if (tp->nvram_size == 0)
15022                         tg3_get_nvram_size(tp);
15023
15024                 tg3_disable_nvram_access(tp);
15025                 tg3_nvram_unlock(tp);
15026
15027         } else {
15028                 tg3_flag_clear(tp, NVRAM);
15029                 tg3_flag_clear(tp, NVRAM_BUFFERED);
15030
15031                 tg3_get_eeprom_size(tp);
15032         }
15033 }
15034
15035 struct subsys_tbl_ent {
15036         u16 subsys_vendor, subsys_devid;
15037         u32 phy_id;
15038 };
15039
15040 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15041         /* Broadcom boards. */
15042         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15043           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15044         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15045           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15046         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15047           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15048         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15049           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15050         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15051           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15052         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15053           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15054         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15055           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15056         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15057           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15058         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15059           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15060         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15061           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15062         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15063           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15064
15065         /* 3com boards. */
15066         { TG3PCI_SUBVENDOR_ID_3COM,
15067           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15068         { TG3PCI_SUBVENDOR_ID_3COM,
15069           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15070         { TG3PCI_SUBVENDOR_ID_3COM,
15071           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15072         { TG3PCI_SUBVENDOR_ID_3COM,
15073           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15074         { TG3PCI_SUBVENDOR_ID_3COM,
15075           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15076
15077         /* DELL boards. */
15078         { TG3PCI_SUBVENDOR_ID_DELL,
15079           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15080         { TG3PCI_SUBVENDOR_ID_DELL,
15081           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15082         { TG3PCI_SUBVENDOR_ID_DELL,
15083           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15084         { TG3PCI_SUBVENDOR_ID_DELL,
15085           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15086
15087         /* Compaq boards. */
15088         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15089           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15090         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15091           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15092         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15093           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15094         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15095           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15096         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15097           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15098
15099         /* IBM boards. */
15100         { TG3PCI_SUBVENDOR_ID_IBM,
15101           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15102 };
15103
15104 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15105 {
15106         int i;
15107
15108         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15109                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15110                      tp->pdev->subsystem_vendor) &&
15111                     (subsys_id_to_phy_id[i].subsys_devid ==
15112                      tp->pdev->subsystem_device))
15113                         return &subsys_id_to_phy_id[i];
15114         }
15115         return NULL;
15116 }
15117
15118 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15119 {
15120         u32 val;
15121
15122         tp->phy_id = TG3_PHY_ID_INVALID;
15123         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15124
15125         /* Assume an onboard device and WOL capable by default.  */
15126         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15127         tg3_flag_set(tp, WOL_CAP);
15128
15129         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15130                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15131                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15132                         tg3_flag_set(tp, IS_NIC);
15133                 }
15134                 val = tr32(VCPU_CFGSHDW);
15135                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15136                         tg3_flag_set(tp, ASPM_WORKAROUND);
15137                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15138                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15139                         tg3_flag_set(tp, WOL_ENABLE);
15140                         device_set_wakeup_enable(&tp->pdev->dev, true);
15141                 }
15142                 goto done;
15143         }
15144
15145         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15146         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15147                 u32 nic_cfg, led_cfg;
15148                 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15149                 u32 nic_phy_id, ver, eeprom_phy_id;
15150                 int eeprom_phy_serdes = 0;
15151
15152                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15153                 tp->nic_sram_data_cfg = nic_cfg;
15154
15155                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15156                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15157                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15158                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
15159                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
15160                     (ver > 0) && (ver < 0x100))
15161                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15162
15163                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15164                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15165
15166                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15167                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
15168                     tg3_asic_rev(tp) == ASIC_REV_5720)
15169                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15170
15171                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15172                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15173                         eeprom_phy_serdes = 1;
15174
15175                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15176                 if (nic_phy_id != 0) {
15177                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15178                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15179
15180                         eeprom_phy_id  = (id1 >> 16) << 10;
15181                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
15182                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15183                 } else
15184                         eeprom_phy_id = 0;
15185
15186                 tp->phy_id = eeprom_phy_id;
15187                 if (eeprom_phy_serdes) {
15188                         if (!tg3_flag(tp, 5705_PLUS))
15189                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15190                         else
15191                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15192                 }
15193
15194                 if (tg3_flag(tp, 5750_PLUS))
15195                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15196                                     SHASTA_EXT_LED_MODE_MASK);
15197                 else
15198                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15199
15200                 switch (led_cfg) {
15201                 default:
15202                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15203                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15204                         break;
15205
15206                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15207                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15208                         break;
15209
15210                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15211                         tp->led_ctrl = LED_CTRL_MODE_MAC;
15212
15213                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15214                          * read on some older 5700/5701 bootcode.
15215                          */
15216                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15217                             tg3_asic_rev(tp) == ASIC_REV_5701)
15218                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15219
15220                         break;
15221
15222                 case SHASTA_EXT_LED_SHARED:
15223                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
15224                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15225                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15226                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15227                                                  LED_CTRL_MODE_PHY_2);
15228
15229                         if (tg3_flag(tp, 5717_PLUS) ||
15230                             tg3_asic_rev(tp) == ASIC_REV_5762)
15231                                 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15232                                                 LED_CTRL_BLINK_RATE_MASK;
15233
15234                         break;
15235
15236                 case SHASTA_EXT_LED_MAC:
15237                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15238                         break;
15239
15240                 case SHASTA_EXT_LED_COMBO:
15241                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
15242                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15243                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15244                                                  LED_CTRL_MODE_PHY_2);
15245                         break;
15246
15247                 }
15248
15249                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15250                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
15251                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15252                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15253
15254                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15255                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15256
15257                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15258                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15259                         if ((tp->pdev->subsystem_vendor ==
15260                              PCI_VENDOR_ID_ARIMA) &&
15261                             (tp->pdev->subsystem_device == 0x205a ||
15262                              tp->pdev->subsystem_device == 0x2063))
15263                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15264                 } else {
15265                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15266                         tg3_flag_set(tp, IS_NIC);
15267                 }
15268
15269                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15270                         tg3_flag_set(tp, ENABLE_ASF);
15271                         if (tg3_flag(tp, 5750_PLUS))
15272                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15273                 }
15274
15275                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15276                     tg3_flag(tp, 5750_PLUS))
15277                         tg3_flag_set(tp, ENABLE_APE);
15278
15279                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15280                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15281                         tg3_flag_clear(tp, WOL_CAP);
15282
15283                 if (tg3_flag(tp, WOL_CAP) &&
15284                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15285                         tg3_flag_set(tp, WOL_ENABLE);
15286                         device_set_wakeup_enable(&tp->pdev->dev, true);
15287                 }
15288
15289                 if (cfg2 & (1 << 17))
15290                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15291
15292                 /* serdes signal pre-emphasis in register 0x590 set by */
15293                 /* bootcode if bit 18 is set */
15294                 if (cfg2 & (1 << 18))
15295                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15296
15297                 if ((tg3_flag(tp, 57765_PLUS) ||
15298                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15299                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15300                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15301                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15302
15303                 if (tg3_flag(tp, PCI_EXPRESS)) {
15304                         u32 cfg3;
15305
15306                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15307                         if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15308                             !tg3_flag(tp, 57765_PLUS) &&
15309                             (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15310                                 tg3_flag_set(tp, ASPM_WORKAROUND);
15311                         if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15312                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15313                         if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15314                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15315                 }
15316
15317                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15318                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15319                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15320                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15321                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15322                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15323
15324                 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15325                         tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15326         }
15327 done:
15328         if (tg3_flag(tp, WOL_CAP))
15329                 device_set_wakeup_enable(&tp->pdev->dev,
15330                                          tg3_flag(tp, WOL_ENABLE));
15331         else
15332                 device_set_wakeup_capable(&tp->pdev->dev, false);
15333 }
15334
15335 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15336 {
15337         int i, err;
15338         u32 val2, off = offset * 8;
15339
15340         err = tg3_nvram_lock(tp);
15341         if (err)
15342                 return err;
15343
15344         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15345         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15346                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15347         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15348         udelay(10);
15349
15350         for (i = 0; i < 100; i++) {
15351                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15352                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15353                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15354                         break;
15355                 }
15356                 udelay(10);
15357         }
15358
15359         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15360
15361         tg3_nvram_unlock(tp);
15362         if (val2 & APE_OTP_STATUS_CMD_DONE)
15363                 return 0;
15364
15365         return -EBUSY;
15366 }
15367
15368 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15369 {
15370         int i;
15371         u32 val;
15372
15373         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15374         tw32(OTP_CTRL, cmd);
15375
15376         /* Wait for up to 1 ms for command to execute. */
15377         for (i = 0; i < 100; i++) {
15378                 val = tr32(OTP_STATUS);
15379                 if (val & OTP_STATUS_CMD_DONE)
15380                         break;
15381                 udelay(10);
15382         }
15383
15384         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15385 }
15386
15387 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15388  * configuration is a 32-bit value that straddles the alignment boundary.
15389  * We do two 32-bit reads and then shift and merge the results.
15390  */
15391 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15392 {
15393         u32 bhalf_otp, thalf_otp;
15394
15395         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15396
15397         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15398                 return 0;
15399
15400         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15401
15402         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15403                 return 0;
15404
15405         thalf_otp = tr32(OTP_READ_DATA);
15406
15407         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15408
15409         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15410                 return 0;
15411
15412         bhalf_otp = tr32(OTP_READ_DATA);
15413
15414         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15415 }
15416
15417 static void tg3_phy_init_link_config(struct tg3 *tp)
15418 {
15419         u32 adv = ADVERTISED_Autoneg;
15420
15421         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15422                 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15423                         adv |= ADVERTISED_1000baseT_Half;
15424                 adv |= ADVERTISED_1000baseT_Full;
15425         }
15426
15427         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15428                 adv |= ADVERTISED_100baseT_Half |
15429                        ADVERTISED_100baseT_Full |
15430                        ADVERTISED_10baseT_Half |
15431                        ADVERTISED_10baseT_Full |
15432                        ADVERTISED_TP;
15433         else
15434                 adv |= ADVERTISED_FIBRE;
15435
15436         tp->link_config.advertising = adv;
15437         tp->link_config.speed = SPEED_UNKNOWN;
15438         tp->link_config.duplex = DUPLEX_UNKNOWN;
15439         tp->link_config.autoneg = AUTONEG_ENABLE;
15440         tp->link_config.active_speed = SPEED_UNKNOWN;
15441         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15442
15443         tp->old_link = -1;
15444 }
15445
15446 static int tg3_phy_probe(struct tg3 *tp)
15447 {
15448         u32 hw_phy_id_1, hw_phy_id_2;
15449         u32 hw_phy_id, hw_phy_id_masked;
15450         int err;
15451
15452         /* flow control autonegotiation is default behavior */
15453         tg3_flag_set(tp, PAUSE_AUTONEG);
15454         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15455
15456         if (tg3_flag(tp, ENABLE_APE)) {
15457                 switch (tp->pci_fn) {
15458                 case 0:
15459                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15460                         break;
15461                 case 1:
15462                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15463                         break;
15464                 case 2:
15465                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15466                         break;
15467                 case 3:
15468                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15469                         break;
15470                 }
15471         }
15472
15473         if (!tg3_flag(tp, ENABLE_ASF) &&
15474             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15475             !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15476                 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15477                                    TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15478
15479         if (tg3_flag(tp, USE_PHYLIB))
15480                 return tg3_phy_init(tp);
15481
15482         /* Reading the PHY ID register can conflict with ASF
15483          * firmware access to the PHY hardware.
15484          */
15485         err = 0;
15486         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15487                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15488         } else {
15489                 /* Now read the physical PHY_ID from the chip and verify
15490                  * that it is sane.  If it doesn't look good, we fall back
15491                  * to either the hard-coded table based PHY_ID and failing
15492                  * that the value found in the eeprom area.
15493                  */
15494                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15495                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15496
15497                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15498                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15499                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15500
15501                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15502         }
15503
15504         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15505                 tp->phy_id = hw_phy_id;
15506                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15507                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15508                 else
15509                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15510         } else {
15511                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15512                         /* Do nothing, phy ID already set up in
15513                          * tg3_get_eeprom_hw_cfg().
15514                          */
15515                 } else {
15516                         struct subsys_tbl_ent *p;
15517
15518                         /* No eeprom signature?  Try the hardcoded
15519                          * subsys device table.
15520                          */
15521                         p = tg3_lookup_by_subsys(tp);
15522                         if (p) {
15523                                 tp->phy_id = p->phy_id;
15524                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15525                                 /* For now we saw the IDs 0xbc050cd0,
15526                                  * 0xbc050f80 and 0xbc050c30 on devices
15527                                  * connected to an BCM4785 and there are
15528                                  * probably more. Just assume that the phy is
15529                                  * supported when it is connected to a SSB core
15530                                  * for now.
15531                                  */
15532                                 return -ENODEV;
15533                         }
15534
15535                         if (!tp->phy_id ||
15536                             tp->phy_id == TG3_PHY_ID_BCM8002)
15537                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15538                 }
15539         }
15540
15541         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15542             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15543              tg3_asic_rev(tp) == ASIC_REV_5720 ||
15544              tg3_asic_rev(tp) == ASIC_REV_57766 ||
15545              tg3_asic_rev(tp) == ASIC_REV_5762 ||
15546              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15547               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15548              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15549               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15550                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15551
15552                 tp->eee.supported = SUPPORTED_100baseT_Full |
15553                                     SUPPORTED_1000baseT_Full;
15554                 tp->eee.advertised = ADVERTISED_100baseT_Full |
15555                                      ADVERTISED_1000baseT_Full;
15556                 tp->eee.eee_enabled = 1;
15557                 tp->eee.tx_lpi_enabled = 1;
15558                 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15559         }
15560
15561         tg3_phy_init_link_config(tp);
15562
15563         if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15564             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15565             !tg3_flag(tp, ENABLE_APE) &&
15566             !tg3_flag(tp, ENABLE_ASF)) {
15567                 u32 bmsr, dummy;
15568
15569                 tg3_readphy(tp, MII_BMSR, &bmsr);
15570                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15571                     (bmsr & BMSR_LSTATUS))
15572                         goto skip_phy_reset;
15573
15574                 err = tg3_phy_reset(tp);
15575                 if (err)
15576                         return err;
15577
15578                 tg3_phy_set_wirespeed(tp);
15579
15580                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15581                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15582                                             tp->link_config.flowctrl);
15583
15584                         tg3_writephy(tp, MII_BMCR,
15585                                      BMCR_ANENABLE | BMCR_ANRESTART);
15586                 }
15587         }
15588
15589 skip_phy_reset:
15590         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15591                 err = tg3_init_5401phy_dsp(tp);
15592                 if (err)
15593                         return err;
15594
15595                 err = tg3_init_5401phy_dsp(tp);
15596         }
15597
15598         return err;
15599 }
15600
15601 static void tg3_read_vpd(struct tg3 *tp)
15602 {
15603         u8 *vpd_data;
15604         unsigned int block_end, rosize, len;
15605         u32 vpdlen;
15606         int j, i = 0;
15607
15608         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15609         if (!vpd_data)
15610                 goto out_no_vpd;
15611
15612         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15613         if (i < 0)
15614                 goto out_not_found;
15615
15616         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15617         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15618         i += PCI_VPD_LRDT_TAG_SIZE;
15619
15620         if (block_end > vpdlen)
15621                 goto out_not_found;
15622
15623         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15624                                       PCI_VPD_RO_KEYWORD_MFR_ID);
15625         if (j > 0) {
15626                 len = pci_vpd_info_field_size(&vpd_data[j]);
15627
15628                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15629                 if (j + len > block_end || len != 4 ||
15630                     memcmp(&vpd_data[j], "1028", 4))
15631                         goto partno;
15632
15633                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15634                                               PCI_VPD_RO_KEYWORD_VENDOR0);
15635                 if (j < 0)
15636                         goto partno;
15637
15638                 len = pci_vpd_info_field_size(&vpd_data[j]);
15639
15640                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15641                 if (j + len > block_end)
15642                         goto partno;
15643
15644                 if (len >= sizeof(tp->fw_ver))
15645                         len = sizeof(tp->fw_ver) - 1;
15646                 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15647                 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15648                          &vpd_data[j]);
15649         }
15650
15651 partno:
15652         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15653                                       PCI_VPD_RO_KEYWORD_PARTNO);
15654         if (i < 0)
15655                 goto out_not_found;
15656
15657         len = pci_vpd_info_field_size(&vpd_data[i]);
15658
15659         i += PCI_VPD_INFO_FLD_HDR_SIZE;
15660         if (len > TG3_BPN_SIZE ||
15661             (len + i) > vpdlen)
15662                 goto out_not_found;
15663
15664         memcpy(tp->board_part_number, &vpd_data[i], len);
15665
15666 out_not_found:
15667         kfree(vpd_data);
15668         if (tp->board_part_number[0])
15669                 return;
15670
15671 out_no_vpd:
15672         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15673                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15674                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15675                         strcpy(tp->board_part_number, "BCM5717");
15676                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15677                         strcpy(tp->board_part_number, "BCM5718");
15678                 else
15679                         goto nomatch;
15680         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15681                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15682                         strcpy(tp->board_part_number, "BCM57780");
15683                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15684                         strcpy(tp->board_part_number, "BCM57760");
15685                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15686                         strcpy(tp->board_part_number, "BCM57790");
15687                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15688                         strcpy(tp->board_part_number, "BCM57788");
15689                 else
15690                         goto nomatch;
15691         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15692                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15693                         strcpy(tp->board_part_number, "BCM57761");
15694                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15695                         strcpy(tp->board_part_number, "BCM57765");
15696                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15697                         strcpy(tp->board_part_number, "BCM57781");
15698                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15699                         strcpy(tp->board_part_number, "BCM57785");
15700                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15701                         strcpy(tp->board_part_number, "BCM57791");
15702                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15703                         strcpy(tp->board_part_number, "BCM57795");
15704                 else
15705                         goto nomatch;
15706         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15707                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15708                         strcpy(tp->board_part_number, "BCM57762");
15709                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15710                         strcpy(tp->board_part_number, "BCM57766");
15711                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15712                         strcpy(tp->board_part_number, "BCM57782");
15713                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15714                         strcpy(tp->board_part_number, "BCM57786");
15715                 else
15716                         goto nomatch;
15717         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15718                 strcpy(tp->board_part_number, "BCM95906");
15719         } else {
15720 nomatch:
15721                 strcpy(tp->board_part_number, "none");
15722         }
15723 }
15724
15725 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15726 {
15727         u32 val;
15728
15729         if (tg3_nvram_read(tp, offset, &val) ||
15730             (val & 0xfc000000) != 0x0c000000 ||
15731             tg3_nvram_read(tp, offset + 4, &val) ||
15732             val != 0)
15733                 return 0;
15734
15735         return 1;
15736 }
15737
15738 static void tg3_read_bc_ver(struct tg3 *tp)
15739 {
15740         u32 val, offset, start, ver_offset;
15741         int i, dst_off;
15742         bool newver = false;
15743
15744         if (tg3_nvram_read(tp, 0xc, &offset) ||
15745             tg3_nvram_read(tp, 0x4, &start))
15746                 return;
15747
15748         offset = tg3_nvram_logical_addr(tp, offset);
15749
15750         if (tg3_nvram_read(tp, offset, &val))
15751                 return;
15752
15753         if ((val & 0xfc000000) == 0x0c000000) {
15754                 if (tg3_nvram_read(tp, offset + 4, &val))
15755                         return;
15756
15757                 if (val == 0)
15758                         newver = true;
15759         }
15760
15761         dst_off = strlen(tp->fw_ver);
15762
15763         if (newver) {
15764                 if (TG3_VER_SIZE - dst_off < 16 ||
15765                     tg3_nvram_read(tp, offset + 8, &ver_offset))
15766                         return;
15767
15768                 offset = offset + ver_offset - start;
15769                 for (i = 0; i < 16; i += 4) {
15770                         __be32 v;
15771                         if (tg3_nvram_read_be32(tp, offset + i, &v))
15772                                 return;
15773
15774                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15775                 }
15776         } else {
15777                 u32 major, minor;
15778
15779                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15780                         return;
15781
15782                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15783                         TG3_NVM_BCVER_MAJSFT;
15784                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15785                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15786                          "v%d.%02d", major, minor);
15787         }
15788 }
15789
15790 static void tg3_read_hwsb_ver(struct tg3 *tp)
15791 {
15792         u32 val, major, minor;
15793
15794         /* Use native endian representation */
15795         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15796                 return;
15797
15798         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15799                 TG3_NVM_HWSB_CFG1_MAJSFT;
15800         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15801                 TG3_NVM_HWSB_CFG1_MINSFT;
15802
15803         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15804 }
15805
15806 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15807 {
15808         u32 offset, major, minor, build;
15809
15810         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15811
15812         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15813                 return;
15814
15815         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15816         case TG3_EEPROM_SB_REVISION_0:
15817                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15818                 break;
15819         case TG3_EEPROM_SB_REVISION_2:
15820                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15821                 break;
15822         case TG3_EEPROM_SB_REVISION_3:
15823                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15824                 break;
15825         case TG3_EEPROM_SB_REVISION_4:
15826                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15827                 break;
15828         case TG3_EEPROM_SB_REVISION_5:
15829                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15830                 break;
15831         case TG3_EEPROM_SB_REVISION_6:
15832                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15833                 break;
15834         default:
15835                 return;
15836         }
15837
15838         if (tg3_nvram_read(tp, offset, &val))
15839                 return;
15840
15841         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15842                 TG3_EEPROM_SB_EDH_BLD_SHFT;
15843         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15844                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15845         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15846
15847         if (minor > 99 || build > 26)
15848                 return;
15849
15850         offset = strlen(tp->fw_ver);
15851         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15852                  " v%d.%02d", major, minor);
15853
15854         if (build > 0) {
15855                 offset = strlen(tp->fw_ver);
15856                 if (offset < TG3_VER_SIZE - 1)
15857                         tp->fw_ver[offset] = 'a' + build - 1;
15858         }
15859 }
15860
15861 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15862 {
15863         u32 val, offset, start;
15864         int i, vlen;
15865
15866         for (offset = TG3_NVM_DIR_START;
15867              offset < TG3_NVM_DIR_END;
15868              offset += TG3_NVM_DIRENT_SIZE) {
15869                 if (tg3_nvram_read(tp, offset, &val))
15870                         return;
15871
15872                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15873                         break;
15874         }
15875
15876         if (offset == TG3_NVM_DIR_END)
15877                 return;
15878
15879         if (!tg3_flag(tp, 5705_PLUS))
15880                 start = 0x08000000;
15881         else if (tg3_nvram_read(tp, offset - 4, &start))
15882                 return;
15883
15884         if (tg3_nvram_read(tp, offset + 4, &offset) ||
15885             !tg3_fw_img_is_valid(tp, offset) ||
15886             tg3_nvram_read(tp, offset + 8, &val))
15887                 return;
15888
15889         offset += val - start;
15890
15891         vlen = strlen(tp->fw_ver);
15892
15893         tp->fw_ver[vlen++] = ',';
15894         tp->fw_ver[vlen++] = ' ';
15895
15896         for (i = 0; i < 4; i++) {
15897                 __be32 v;
15898                 if (tg3_nvram_read_be32(tp, offset, &v))
15899                         return;
15900
15901                 offset += sizeof(v);
15902
15903                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15904                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15905                         break;
15906                 }
15907
15908                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15909                 vlen += sizeof(v);
15910         }
15911 }
15912
15913 static void tg3_probe_ncsi(struct tg3 *tp)
15914 {
15915         u32 apedata;
15916
15917         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15918         if (apedata != APE_SEG_SIG_MAGIC)
15919                 return;
15920
15921         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15922         if (!(apedata & APE_FW_STATUS_READY))
15923                 return;
15924
15925         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15926                 tg3_flag_set(tp, APE_HAS_NCSI);
15927 }
15928
15929 static void tg3_read_dash_ver(struct tg3 *tp)
15930 {
15931         int vlen;
15932         u32 apedata;
15933         char *fwtype;
15934
15935         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15936
15937         if (tg3_flag(tp, APE_HAS_NCSI))
15938                 fwtype = "NCSI";
15939         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15940                 fwtype = "SMASH";
15941         else
15942                 fwtype = "DASH";
15943
15944         vlen = strlen(tp->fw_ver);
15945
15946         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15947                  fwtype,
15948                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15949                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15950                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15951                  (apedata & APE_FW_VERSION_BLDMSK));
15952 }
15953
15954 static void tg3_read_otp_ver(struct tg3 *tp)
15955 {
15956         u32 val, val2;
15957
15958         if (tg3_asic_rev(tp) != ASIC_REV_5762)
15959                 return;
15960
15961         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15962             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15963             TG3_OTP_MAGIC0_VALID(val)) {
15964                 u64 val64 = (u64) val << 32 | val2;
15965                 u32 ver = 0;
15966                 int i, vlen;
15967
15968                 for (i = 0; i < 7; i++) {
15969                         if ((val64 & 0xff) == 0)
15970                                 break;
15971                         ver = val64 & 0xff;
15972                         val64 >>= 8;
15973                 }
15974                 vlen = strlen(tp->fw_ver);
15975                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15976         }
15977 }
15978
15979 static void tg3_read_fw_ver(struct tg3 *tp)
15980 {
15981         u32 val;
15982         bool vpd_vers = false;
15983
15984         if (tp->fw_ver[0] != 0)
15985                 vpd_vers = true;
15986
15987         if (tg3_flag(tp, NO_NVRAM)) {
15988                 strcat(tp->fw_ver, "sb");
15989                 tg3_read_otp_ver(tp);
15990                 return;
15991         }
15992
15993         if (tg3_nvram_read(tp, 0, &val))
15994                 return;
15995
15996         if (val == TG3_EEPROM_MAGIC)
15997                 tg3_read_bc_ver(tp);
15998         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15999                 tg3_read_sb_ver(tp, val);
16000         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16001                 tg3_read_hwsb_ver(tp);
16002
16003         if (tg3_flag(tp, ENABLE_ASF)) {
16004                 if (tg3_flag(tp, ENABLE_APE)) {
16005                         tg3_probe_ncsi(tp);
16006                         if (!vpd_vers)
16007                                 tg3_read_dash_ver(tp);
16008                 } else if (!vpd_vers) {
16009                         tg3_read_mgmtfw_ver(tp);
16010                 }
16011         }
16012
16013         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16014 }
16015
16016 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16017 {
16018         if (tg3_flag(tp, LRG_PROD_RING_CAP))
16019                 return TG3_RX_RET_MAX_SIZE_5717;
16020         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16021                 return TG3_RX_RET_MAX_SIZE_5700;
16022         else
16023                 return TG3_RX_RET_MAX_SIZE_5705;
16024 }
16025
16026 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16027         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16028         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16029         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16030         { },
16031 };
16032
16033 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16034 {
16035         struct pci_dev *peer;
16036         unsigned int func, devnr = tp->pdev->devfn & ~7;
16037
16038         for (func = 0; func < 8; func++) {
16039                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16040                 if (peer && peer != tp->pdev)
16041                         break;
16042                 pci_dev_put(peer);
16043         }
16044         /* 5704 can be configured in single-port mode, set peer to
16045          * tp->pdev in that case.
16046          */
16047         if (!peer) {
16048                 peer = tp->pdev;
16049                 return peer;
16050         }
16051
16052         /*
16053          * We don't need to keep the refcount elevated; there's no way
16054          * to remove one half of this device without removing the other
16055          */
16056         pci_dev_put(peer);
16057
16058         return peer;
16059 }
16060
16061 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16062 {
16063         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16064         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16065                 u32 reg;
16066
16067                 /* All devices that use the alternate
16068                  * ASIC REV location have a CPMU.
16069                  */
16070                 tg3_flag_set(tp, CPMU_PRESENT);
16071
16072                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16073                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16074                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16075                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16076                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16077                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16078                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16079                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16080                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16081                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16082                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16083                         reg = TG3PCI_GEN2_PRODID_ASICREV;
16084                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16085                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16086                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16087                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16088                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16089                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16090                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16091                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16092                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16093                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16094                         reg = TG3PCI_GEN15_PRODID_ASICREV;
16095                 else
16096                         reg = TG3PCI_PRODID_ASICREV;
16097
16098                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16099         }
16100
16101         /* Wrong chip ID in 5752 A0. This code can be removed later
16102          * as A0 is not in production.
16103          */
16104         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16105                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16106
16107         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16108                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16109
16110         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16111             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16112             tg3_asic_rev(tp) == ASIC_REV_5720)
16113                 tg3_flag_set(tp, 5717_PLUS);
16114
16115         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16116             tg3_asic_rev(tp) == ASIC_REV_57766)
16117                 tg3_flag_set(tp, 57765_CLASS);
16118
16119         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16120              tg3_asic_rev(tp) == ASIC_REV_5762)
16121                 tg3_flag_set(tp, 57765_PLUS);
16122
16123         /* Intentionally exclude ASIC_REV_5906 */
16124         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16125             tg3_asic_rev(tp) == ASIC_REV_5787 ||
16126             tg3_asic_rev(tp) == ASIC_REV_5784 ||
16127             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16128             tg3_asic_rev(tp) == ASIC_REV_5785 ||
16129             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16130             tg3_flag(tp, 57765_PLUS))
16131                 tg3_flag_set(tp, 5755_PLUS);
16132
16133         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16134             tg3_asic_rev(tp) == ASIC_REV_5714)
16135                 tg3_flag_set(tp, 5780_CLASS);
16136
16137         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16138             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16139             tg3_asic_rev(tp) == ASIC_REV_5906 ||
16140             tg3_flag(tp, 5755_PLUS) ||
16141             tg3_flag(tp, 5780_CLASS))
16142                 tg3_flag_set(tp, 5750_PLUS);
16143
16144         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16145             tg3_flag(tp, 5750_PLUS))
16146                 tg3_flag_set(tp, 5705_PLUS);
16147 }
16148
16149 static bool tg3_10_100_only_device(struct tg3 *tp,
16150                                    const struct pci_device_id *ent)
16151 {
16152         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16153
16154         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16155              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16156             (tp->phy_flags & TG3_PHYFLG_IS_FET))
16157                 return true;
16158
16159         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16160                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16161                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16162                                 return true;
16163                 } else {
16164                         return true;
16165                 }
16166         }
16167
16168         return false;
16169 }
16170
16171 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16172 {
16173         u32 misc_ctrl_reg;
16174         u32 pci_state_reg, grc_misc_cfg;
16175         u32 val;
16176         u16 pci_cmd;
16177         int err;
16178
16179         /* Force memory write invalidate off.  If we leave it on,
16180          * then on 5700_BX chips we have to enable a workaround.
16181          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16182          * to match the cacheline size.  The Broadcom driver have this
16183          * workaround but turns MWI off all the times so never uses
16184          * it.  This seems to suggest that the workaround is insufficient.
16185          */
16186         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16187         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16188         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16189
16190         /* Important! -- Make sure register accesses are byteswapped
16191          * correctly.  Also, for those chips that require it, make
16192          * sure that indirect register accesses are enabled before
16193          * the first operation.
16194          */
16195         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16196                               &misc_ctrl_reg);
16197         tp->misc_host_ctrl |= (misc_ctrl_reg &
16198                                MISC_HOST_CTRL_CHIPREV);
16199         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16200                                tp->misc_host_ctrl);
16201
16202         tg3_detect_asic_rev(tp, misc_ctrl_reg);
16203
16204         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16205          * we need to disable memory and use config. cycles
16206          * only to access all registers. The 5702/03 chips
16207          * can mistakenly decode the special cycles from the
16208          * ICH chipsets as memory write cycles, causing corruption
16209          * of register and memory space. Only certain ICH bridges
16210          * will drive special cycles with non-zero data during the
16211          * address phase which can fall within the 5703's address
16212          * range. This is not an ICH bug as the PCI spec allows
16213          * non-zero address during special cycles. However, only
16214          * these ICH bridges are known to drive non-zero addresses
16215          * during special cycles.
16216          *
16217          * Since special cycles do not cross PCI bridges, we only
16218          * enable this workaround if the 5703 is on the secondary
16219          * bus of these ICH bridges.
16220          */
16221         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16222             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16223                 static struct tg3_dev_id {
16224                         u32     vendor;
16225                         u32     device;
16226                         u32     rev;
16227                 } ich_chipsets[] = {
16228                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16229                           PCI_ANY_ID },
16230                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16231                           PCI_ANY_ID },
16232                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16233                           0xa },
16234                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16235                           PCI_ANY_ID },
16236                         { },
16237                 };
16238                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16239                 struct pci_dev *bridge = NULL;
16240
16241                 while (pci_id->vendor != 0) {
16242                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
16243                                                 bridge);
16244                         if (!bridge) {
16245                                 pci_id++;
16246                                 continue;
16247                         }
16248                         if (pci_id->rev != PCI_ANY_ID) {
16249                                 if (bridge->revision > pci_id->rev)
16250                                         continue;
16251                         }
16252                         if (bridge->subordinate &&
16253                             (bridge->subordinate->number ==
16254                              tp->pdev->bus->number)) {
16255                                 tg3_flag_set(tp, ICH_WORKAROUND);
16256                                 pci_dev_put(bridge);
16257                                 break;
16258                         }
16259                 }
16260         }
16261
16262         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16263                 static struct tg3_dev_id {
16264                         u32     vendor;
16265                         u32     device;
16266                 } bridge_chipsets[] = {
16267                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16268                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16269                         { },
16270                 };
16271                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16272                 struct pci_dev *bridge = NULL;
16273
16274                 while (pci_id->vendor != 0) {
16275                         bridge = pci_get_device(pci_id->vendor,
16276                                                 pci_id->device,
16277                                                 bridge);
16278                         if (!bridge) {
16279                                 pci_id++;
16280                                 continue;
16281                         }
16282                         if (bridge->subordinate &&
16283                             (bridge->subordinate->number <=
16284                              tp->pdev->bus->number) &&
16285                             (bridge->subordinate->busn_res.end >=
16286                              tp->pdev->bus->number)) {
16287                                 tg3_flag_set(tp, 5701_DMA_BUG);
16288                                 pci_dev_put(bridge);
16289                                 break;
16290                         }
16291                 }
16292         }
16293
16294         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16295          * DMA addresses > 40-bit. This bridge may have other additional
16296          * 57xx devices behind it in some 4-port NIC designs for example.
16297          * Any tg3 device found behind the bridge will also need the 40-bit
16298          * DMA workaround.
16299          */
16300         if (tg3_flag(tp, 5780_CLASS)) {
16301                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16302                 tp->msi_cap = tp->pdev->msi_cap;
16303         } else {
16304                 struct pci_dev *bridge = NULL;
16305
16306                 do {
16307                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16308                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
16309                                                 bridge);
16310                         if (bridge && bridge->subordinate &&
16311                             (bridge->subordinate->number <=
16312                              tp->pdev->bus->number) &&
16313                             (bridge->subordinate->busn_res.end >=
16314                              tp->pdev->bus->number)) {
16315                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16316                                 pci_dev_put(bridge);
16317                                 break;
16318                         }
16319                 } while (bridge);
16320         }
16321
16322         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16323             tg3_asic_rev(tp) == ASIC_REV_5714)
16324                 tp->pdev_peer = tg3_find_peer(tp);
16325
16326         /* Determine TSO capabilities */
16327         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16328                 ; /* Do nothing. HW bug. */
16329         else if (tg3_flag(tp, 57765_PLUS))
16330                 tg3_flag_set(tp, HW_TSO_3);
16331         else if (tg3_flag(tp, 5755_PLUS) ||
16332                  tg3_asic_rev(tp) == ASIC_REV_5906)
16333                 tg3_flag_set(tp, HW_TSO_2);
16334         else if (tg3_flag(tp, 5750_PLUS)) {
16335                 tg3_flag_set(tp, HW_TSO_1);
16336                 tg3_flag_set(tp, TSO_BUG);
16337                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16338                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16339                         tg3_flag_clear(tp, TSO_BUG);
16340         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16341                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
16342                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16343                 tg3_flag_set(tp, FW_TSO);
16344                 tg3_flag_set(tp, TSO_BUG);
16345                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16346                         tp->fw_needed = FIRMWARE_TG3TSO5;
16347                 else
16348                         tp->fw_needed = FIRMWARE_TG3TSO;
16349         }
16350
16351         /* Selectively allow TSO based on operating conditions */
16352         if (tg3_flag(tp, HW_TSO_1) ||
16353             tg3_flag(tp, HW_TSO_2) ||
16354             tg3_flag(tp, HW_TSO_3) ||
16355             tg3_flag(tp, FW_TSO)) {
16356                 /* For firmware TSO, assume ASF is disabled.
16357                  * We'll disable TSO later if we discover ASF
16358                  * is enabled in tg3_get_eeprom_hw_cfg().
16359                  */
16360                 tg3_flag_set(tp, TSO_CAPABLE);
16361         } else {
16362                 tg3_flag_clear(tp, TSO_CAPABLE);
16363                 tg3_flag_clear(tp, TSO_BUG);
16364                 tp->fw_needed = NULL;
16365         }
16366
16367         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16368                 tp->fw_needed = FIRMWARE_TG3;
16369
16370         if (tg3_asic_rev(tp) == ASIC_REV_57766)
16371                 tp->fw_needed = FIRMWARE_TG357766;
16372
16373         tp->irq_max = 1;
16374
16375         if (tg3_flag(tp, 5750_PLUS)) {
16376                 tg3_flag_set(tp, SUPPORT_MSI);
16377                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16378                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16379                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16380                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16381                      tp->pdev_peer == tp->pdev))
16382                         tg3_flag_clear(tp, SUPPORT_MSI);
16383
16384                 if (tg3_flag(tp, 5755_PLUS) ||
16385                     tg3_asic_rev(tp) == ASIC_REV_5906) {
16386                         tg3_flag_set(tp, 1SHOT_MSI);
16387                 }
16388
16389                 if (tg3_flag(tp, 57765_PLUS)) {
16390                         tg3_flag_set(tp, SUPPORT_MSIX);
16391                         tp->irq_max = TG3_IRQ_MAX_VECS;
16392                 }
16393         }
16394
16395         tp->txq_max = 1;
16396         tp->rxq_max = 1;
16397         if (tp->irq_max > 1) {
16398                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16399                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16400
16401                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16402                     tg3_asic_rev(tp) == ASIC_REV_5720)
16403                         tp->txq_max = tp->irq_max - 1;
16404         }
16405
16406         if (tg3_flag(tp, 5755_PLUS) ||
16407             tg3_asic_rev(tp) == ASIC_REV_5906)
16408                 tg3_flag_set(tp, SHORT_DMA_BUG);
16409
16410         if (tg3_asic_rev(tp) == ASIC_REV_5719)
16411                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16412
16413         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16414             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16415             tg3_asic_rev(tp) == ASIC_REV_5720 ||
16416             tg3_asic_rev(tp) == ASIC_REV_5762)
16417                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16418
16419         if (tg3_flag(tp, 57765_PLUS) &&
16420             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16421                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16422
16423         if (!tg3_flag(tp, 5705_PLUS) ||
16424             tg3_flag(tp, 5780_CLASS) ||
16425             tg3_flag(tp, USE_JUMBO_BDFLAG))
16426                 tg3_flag_set(tp, JUMBO_CAPABLE);
16427
16428         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16429                               &pci_state_reg);
16430
16431         if (pci_is_pcie(tp->pdev)) {
16432                 u16 lnkctl;
16433
16434                 tg3_flag_set(tp, PCI_EXPRESS);
16435
16436                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16437                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16438                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16439                                 tg3_flag_clear(tp, HW_TSO_2);
16440                                 tg3_flag_clear(tp, TSO_CAPABLE);
16441                         }
16442                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16443                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16444                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16445                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16446                                 tg3_flag_set(tp, CLKREQ_BUG);
16447                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16448                         tg3_flag_set(tp, L1PLLPD_EN);
16449                 }
16450         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16451                 /* BCM5785 devices are effectively PCIe devices, and should
16452                  * follow PCIe codepaths, but do not have a PCIe capabilities
16453                  * section.
16454                  */
16455                 tg3_flag_set(tp, PCI_EXPRESS);
16456         } else if (!tg3_flag(tp, 5705_PLUS) ||
16457                    tg3_flag(tp, 5780_CLASS)) {
16458                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16459                 if (!tp->pcix_cap) {
16460                         dev_err(&tp->pdev->dev,
16461                                 "Cannot find PCI-X capability, aborting\n");
16462                         return -EIO;
16463                 }
16464
16465                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16466                         tg3_flag_set(tp, PCIX_MODE);
16467         }
16468
16469         /* If we have an AMD 762 or VIA K8T800 chipset, write
16470          * reordering to the mailbox registers done by the host
16471          * controller can cause major troubles.  We read back from
16472          * every mailbox register write to force the writes to be
16473          * posted to the chip in order.
16474          */
16475         if (pci_dev_present(tg3_write_reorder_chipsets) &&
16476             !tg3_flag(tp, PCI_EXPRESS))
16477                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16478
16479         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16480                              &tp->pci_cacheline_sz);
16481         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16482                              &tp->pci_lat_timer);
16483         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16484             tp->pci_lat_timer < 64) {
16485                 tp->pci_lat_timer = 64;
16486                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16487                                       tp->pci_lat_timer);
16488         }
16489
16490         /* Important! -- It is critical that the PCI-X hw workaround
16491          * situation is decided before the first MMIO register access.
16492          */
16493         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16494                 /* 5700 BX chips need to have their TX producer index
16495                  * mailboxes written twice to workaround a bug.
16496                  */
16497                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16498
16499                 /* If we are in PCI-X mode, enable register write workaround.
16500                  *
16501                  * The workaround is to use indirect register accesses
16502                  * for all chip writes not to mailbox registers.
16503                  */
16504                 if (tg3_flag(tp, PCIX_MODE)) {
16505                         u32 pm_reg;
16506
16507                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16508
16509                         /* The chip can have it's power management PCI config
16510                          * space registers clobbered due to this bug.
16511                          * So explicitly force the chip into D0 here.
16512                          */
16513                         pci_read_config_dword(tp->pdev,
16514                                               tp->pdev->pm_cap + PCI_PM_CTRL,
16515                                               &pm_reg);
16516                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16517                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16518                         pci_write_config_dword(tp->pdev,
16519                                                tp->pdev->pm_cap + PCI_PM_CTRL,
16520                                                pm_reg);
16521
16522                         /* Also, force SERR#/PERR# in PCI command. */
16523                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16524                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16525                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16526                 }
16527         }
16528
16529         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16530                 tg3_flag_set(tp, PCI_HIGH_SPEED);
16531         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16532                 tg3_flag_set(tp, PCI_32BIT);
16533
16534         /* Chip-specific fixup from Broadcom driver */
16535         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16536             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16537                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16538                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16539         }
16540
16541         /* Default fast path register access methods */
16542         tp->read32 = tg3_read32;
16543         tp->write32 = tg3_write32;
16544         tp->read32_mbox = tg3_read32;
16545         tp->write32_mbox = tg3_write32;
16546         tp->write32_tx_mbox = tg3_write32;
16547         tp->write32_rx_mbox = tg3_write32;
16548
16549         /* Various workaround register access methods */
16550         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16551                 tp->write32 = tg3_write_indirect_reg32;
16552         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16553                  (tg3_flag(tp, PCI_EXPRESS) &&
16554                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16555                 /*
16556                  * Back to back register writes can cause problems on these
16557                  * chips, the workaround is to read back all reg writes
16558                  * except those to mailbox regs.
16559                  *
16560                  * See tg3_write_indirect_reg32().
16561                  */
16562                 tp->write32 = tg3_write_flush_reg32;
16563         }
16564
16565         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16566                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16567                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16568                         tp->write32_rx_mbox = tg3_write_flush_reg32;
16569         }
16570
16571         if (tg3_flag(tp, ICH_WORKAROUND)) {
16572                 tp->read32 = tg3_read_indirect_reg32;
16573                 tp->write32 = tg3_write_indirect_reg32;
16574                 tp->read32_mbox = tg3_read_indirect_mbox;
16575                 tp->write32_mbox = tg3_write_indirect_mbox;
16576                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16577                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16578
16579                 iounmap(tp->regs);
16580                 tp->regs = NULL;
16581
16582                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16583                 pci_cmd &= ~PCI_COMMAND_MEMORY;
16584                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16585         }
16586         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16587                 tp->read32_mbox = tg3_read32_mbox_5906;
16588                 tp->write32_mbox = tg3_write32_mbox_5906;
16589                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16590                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16591         }
16592
16593         if (tp->write32 == tg3_write_indirect_reg32 ||
16594             (tg3_flag(tp, PCIX_MODE) &&
16595              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16596               tg3_asic_rev(tp) == ASIC_REV_5701)))
16597                 tg3_flag_set(tp, SRAM_USE_CONFIG);
16598
16599         /* The memory arbiter has to be enabled in order for SRAM accesses
16600          * to succeed.  Normally on powerup the tg3 chip firmware will make
16601          * sure it is enabled, but other entities such as system netboot
16602          * code might disable it.
16603          */
16604         val = tr32(MEMARB_MODE);
16605         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16606
16607         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16608         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16609             tg3_flag(tp, 5780_CLASS)) {
16610                 if (tg3_flag(tp, PCIX_MODE)) {
16611                         pci_read_config_dword(tp->pdev,
16612                                               tp->pcix_cap + PCI_X_STATUS,
16613                                               &val);
16614                         tp->pci_fn = val & 0x7;
16615                 }
16616         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16617                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16618                    tg3_asic_rev(tp) == ASIC_REV_5720) {
16619                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16620                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16621                         val = tr32(TG3_CPMU_STATUS);
16622
16623                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16624                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16625                 else
16626                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16627                                      TG3_CPMU_STATUS_FSHFT_5719;
16628         }
16629
16630         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16631                 tp->write32_tx_mbox = tg3_write_flush_reg32;
16632                 tp->write32_rx_mbox = tg3_write_flush_reg32;
16633         }
16634
16635         /* Get eeprom hw config before calling tg3_set_power_state().
16636          * In particular, the TG3_FLAG_IS_NIC flag must be
16637          * determined before calling tg3_set_power_state() so that
16638          * we know whether or not to switch out of Vaux power.
16639          * When the flag is set, it means that GPIO1 is used for eeprom
16640          * write protect and also implies that it is a LOM where GPIOs
16641          * are not used to switch power.
16642          */
16643         tg3_get_eeprom_hw_cfg(tp);
16644
16645         if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16646                 tg3_flag_clear(tp, TSO_CAPABLE);
16647                 tg3_flag_clear(tp, TSO_BUG);
16648                 tp->fw_needed = NULL;
16649         }
16650
16651         if (tg3_flag(tp, ENABLE_APE)) {
16652                 /* Allow reads and writes to the
16653                  * APE register and memory space.
16654                  */
16655                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16656                                  PCISTATE_ALLOW_APE_SHMEM_WR |
16657                                  PCISTATE_ALLOW_APE_PSPACE_WR;
16658                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16659                                        pci_state_reg);
16660
16661                 tg3_ape_lock_init(tp);
16662         }
16663
16664         /* Set up tp->grc_local_ctrl before calling
16665          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16666          * will bring 5700's external PHY out of reset.
16667          * It is also used as eeprom write protect on LOMs.
16668          */
16669         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16670         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16671             tg3_flag(tp, EEPROM_WRITE_PROT))
16672                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16673                                        GRC_LCLCTRL_GPIO_OUTPUT1);
16674         /* Unused GPIO3 must be driven as output on 5752 because there
16675          * are no pull-up resistors on unused GPIO pins.
16676          */
16677         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16678                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16679
16680         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16681             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16682             tg3_flag(tp, 57765_CLASS))
16683                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16684
16685         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16686             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16687                 /* Turn off the debug UART. */
16688                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16689                 if (tg3_flag(tp, IS_NIC))
16690                         /* Keep VMain power. */
16691                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16692                                               GRC_LCLCTRL_GPIO_OUTPUT0;
16693         }
16694
16695         if (tg3_asic_rev(tp) == ASIC_REV_5762)
16696                 tp->grc_local_ctrl |=
16697                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16698
16699         /* Switch out of Vaux if it is a NIC */
16700         tg3_pwrsrc_switch_to_vmain(tp);
16701
16702         /* Derive initial jumbo mode from MTU assigned in
16703          * ether_setup() via the alloc_etherdev() call
16704          */
16705         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16706                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16707
16708         /* Determine WakeOnLan speed to use. */
16709         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16710             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16711             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16712             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16713                 tg3_flag_clear(tp, WOL_SPEED_100MB);
16714         } else {
16715                 tg3_flag_set(tp, WOL_SPEED_100MB);
16716         }
16717
16718         if (tg3_asic_rev(tp) == ASIC_REV_5906)
16719                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16720
16721         /* A few boards don't want Ethernet@WireSpeed phy feature */
16722         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16723             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16724              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16725              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16726             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16727             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16728                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16729
16730         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16731             tg3_chip_rev(tp) == CHIPREV_5704_AX)
16732                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16733         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16734                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16735
16736         if (tg3_flag(tp, 5705_PLUS) &&
16737             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16738             tg3_asic_rev(tp) != ASIC_REV_5785 &&
16739             tg3_asic_rev(tp) != ASIC_REV_57780 &&
16740             !tg3_flag(tp, 57765_PLUS)) {
16741                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16742                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
16743                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
16744                     tg3_asic_rev(tp) == ASIC_REV_5761) {
16745                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16746                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16747                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16748                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16749                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16750                 } else
16751                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16752         }
16753
16754         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16755             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16756                 tp->phy_otp = tg3_read_otp_phycfg(tp);
16757                 if (tp->phy_otp == 0)
16758                         tp->phy_otp = TG3_OTP_DEFAULT;
16759         }
16760
16761         if (tg3_flag(tp, CPMU_PRESENT))
16762                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16763         else
16764                 tp->mi_mode = MAC_MI_MODE_BASE;
16765
16766         tp->coalesce_mode = 0;
16767         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16768             tg3_chip_rev(tp) != CHIPREV_5700_BX)
16769                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16770
16771         /* Set these bits to enable statistics workaround. */
16772         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16773             tg3_asic_rev(tp) == ASIC_REV_5762 ||
16774             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16775             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16776                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16777                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16778         }
16779
16780         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16781             tg3_asic_rev(tp) == ASIC_REV_57780)
16782                 tg3_flag_set(tp, USE_PHYLIB);
16783
16784         err = tg3_mdio_init(tp);
16785         if (err)
16786                 return err;
16787
16788         /* Initialize data/descriptor byte/word swapping. */
16789         val = tr32(GRC_MODE);
16790         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16791             tg3_asic_rev(tp) == ASIC_REV_5762)
16792                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16793                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
16794                         GRC_MODE_B2HRX_ENABLE |
16795                         GRC_MODE_HTX2B_ENABLE |
16796                         GRC_MODE_HOST_STACKUP);
16797         else
16798                 val &= GRC_MODE_HOST_STACKUP;
16799
16800         tw32(GRC_MODE, val | tp->grc_mode);
16801
16802         tg3_switch_clocks(tp);
16803
16804         /* Clear this out for sanity. */
16805         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16806
16807         /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16808         tw32(TG3PCI_REG_BASE_ADDR, 0);
16809
16810         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16811                               &pci_state_reg);
16812         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16813             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16814                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16815                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16816                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16817                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16818                         void __iomem *sram_base;
16819
16820                         /* Write some dummy words into the SRAM status block
16821                          * area, see if it reads back correctly.  If the return
16822                          * value is bad, force enable the PCIX workaround.
16823                          */
16824                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16825
16826                         writel(0x00000000, sram_base);
16827                         writel(0x00000000, sram_base + 4);
16828                         writel(0xffffffff, sram_base + 4);
16829                         if (readl(sram_base) != 0x00000000)
16830                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16831                 }
16832         }
16833
16834         udelay(50);
16835         tg3_nvram_init(tp);
16836
16837         /* If the device has an NVRAM, no need to load patch firmware */
16838         if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16839             !tg3_flag(tp, NO_NVRAM))
16840                 tp->fw_needed = NULL;
16841
16842         grc_misc_cfg = tr32(GRC_MISC_CFG);
16843         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16844
16845         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16846             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16847              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16848                 tg3_flag_set(tp, IS_5788);
16849
16850         if (!tg3_flag(tp, IS_5788) &&
16851             tg3_asic_rev(tp) != ASIC_REV_5700)
16852                 tg3_flag_set(tp, TAGGED_STATUS);
16853         if (tg3_flag(tp, TAGGED_STATUS)) {
16854                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16855                                       HOSTCC_MODE_CLRTICK_TXBD);
16856
16857                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16858                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16859                                        tp->misc_host_ctrl);
16860         }
16861
16862         /* Preserve the APE MAC_MODE bits */
16863         if (tg3_flag(tp, ENABLE_APE))
16864                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16865         else
16866                 tp->mac_mode = 0;
16867
16868         if (tg3_10_100_only_device(tp, ent))
16869                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16870
16871         err = tg3_phy_probe(tp);
16872         if (err) {
16873                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16874                 /* ... but do not return immediately ... */
16875                 tg3_mdio_fini(tp);
16876         }
16877
16878         tg3_read_vpd(tp);
16879         tg3_read_fw_ver(tp);
16880
16881         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16882                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16883         } else {
16884                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16885                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16886                 else
16887                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16888         }
16889
16890         /* 5700 {AX,BX} chips have a broken status block link
16891          * change bit implementation, so we must use the
16892          * status register in those cases.
16893          */
16894         if (tg3_asic_rev(tp) == ASIC_REV_5700)
16895                 tg3_flag_set(tp, USE_LINKCHG_REG);
16896         else
16897                 tg3_flag_clear(tp, USE_LINKCHG_REG);
16898
16899         /* The led_ctrl is set during tg3_phy_probe, here we might
16900          * have to force the link status polling mechanism based
16901          * upon subsystem IDs.
16902          */
16903         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16904             tg3_asic_rev(tp) == ASIC_REV_5701 &&
16905             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16906                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16907                 tg3_flag_set(tp, USE_LINKCHG_REG);
16908         }
16909
16910         /* For all SERDES we poll the MAC status register. */
16911         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16912                 tg3_flag_set(tp, POLL_SERDES);
16913         else
16914                 tg3_flag_clear(tp, POLL_SERDES);
16915
16916         if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16917                 tg3_flag_set(tp, POLL_CPMU_LINK);
16918
16919         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16920         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16921         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16922             tg3_flag(tp, PCIX_MODE)) {
16923                 tp->rx_offset = NET_SKB_PAD;
16924 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16925                 tp->rx_copy_thresh = ~(u16)0;
16926 #endif
16927         }
16928
16929         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16930         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16931         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16932
16933         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16934
16935         /* Increment the rx prod index on the rx std ring by at most
16936          * 8 for these chips to workaround hw errata.
16937          */
16938         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16939             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16940             tg3_asic_rev(tp) == ASIC_REV_5755)
16941                 tp->rx_std_max_post = 8;
16942
16943         if (tg3_flag(tp, ASPM_WORKAROUND))
16944                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16945                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
16946
16947         return err;
16948 }
16949
16950 #ifdef CONFIG_SPARC
16951 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16952 {
16953         struct net_device *dev = tp->dev;
16954         struct pci_dev *pdev = tp->pdev;
16955         struct device_node *dp = pci_device_to_OF_node(pdev);
16956         const unsigned char *addr;
16957         int len;
16958
16959         addr = of_get_property(dp, "local-mac-address", &len);
16960         if (addr && len == ETH_ALEN) {
16961                 memcpy(dev->dev_addr, addr, ETH_ALEN);
16962                 return 0;
16963         }
16964         return -ENODEV;
16965 }
16966
16967 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16968 {
16969         struct net_device *dev = tp->dev;
16970
16971         memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
16972         return 0;
16973 }
16974 #endif
16975
16976 static int tg3_get_device_address(struct tg3 *tp)
16977 {
16978         struct net_device *dev = tp->dev;
16979         u32 hi, lo, mac_offset;
16980         int addr_ok = 0;
16981         int err;
16982
16983 #ifdef CONFIG_SPARC
16984         if (!tg3_get_macaddr_sparc(tp))
16985                 return 0;
16986 #endif
16987
16988         if (tg3_flag(tp, IS_SSB_CORE)) {
16989                 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16990                 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16991                         return 0;
16992         }
16993
16994         mac_offset = 0x7c;
16995         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16996             tg3_flag(tp, 5780_CLASS)) {
16997                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16998                         mac_offset = 0xcc;
16999                 if (tg3_nvram_lock(tp))
17000                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
17001                 else
17002                         tg3_nvram_unlock(tp);
17003         } else if (tg3_flag(tp, 5717_PLUS)) {
17004                 if (tp->pci_fn & 1)
17005                         mac_offset = 0xcc;
17006                 if (tp->pci_fn > 1)
17007                         mac_offset += 0x18c;
17008         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17009                 mac_offset = 0x10;
17010
17011         /* First try to get it from MAC address mailbox. */
17012         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17013         if ((hi >> 16) == 0x484b) {
17014                 dev->dev_addr[0] = (hi >>  8) & 0xff;
17015                 dev->dev_addr[1] = (hi >>  0) & 0xff;
17016
17017                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17018                 dev->dev_addr[2] = (lo >> 24) & 0xff;
17019                 dev->dev_addr[3] = (lo >> 16) & 0xff;
17020                 dev->dev_addr[4] = (lo >>  8) & 0xff;
17021                 dev->dev_addr[5] = (lo >>  0) & 0xff;
17022
17023                 /* Some old bootcode may report a 0 MAC address in SRAM */
17024                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
17025         }
17026         if (!addr_ok) {
17027                 /* Next, try NVRAM. */
17028                 if (!tg3_flag(tp, NO_NVRAM) &&
17029                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
17030                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
17031                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
17032                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
17033                 }
17034                 /* Finally just fetch it out of the MAC control regs. */
17035                 else {
17036                         hi = tr32(MAC_ADDR_0_HIGH);
17037                         lo = tr32(MAC_ADDR_0_LOW);
17038
17039                         dev->dev_addr[5] = lo & 0xff;
17040                         dev->dev_addr[4] = (lo >> 8) & 0xff;
17041                         dev->dev_addr[3] = (lo >> 16) & 0xff;
17042                         dev->dev_addr[2] = (lo >> 24) & 0xff;
17043                         dev->dev_addr[1] = hi & 0xff;
17044                         dev->dev_addr[0] = (hi >> 8) & 0xff;
17045                 }
17046         }
17047
17048         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
17049 #ifdef CONFIG_SPARC
17050                 if (!tg3_get_default_macaddr_sparc(tp))
17051                         return 0;
17052 #endif
17053                 return -EINVAL;
17054         }
17055         return 0;
17056 }
17057
17058 #define BOUNDARY_SINGLE_CACHELINE       1
17059 #define BOUNDARY_MULTI_CACHELINE        2
17060
17061 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17062 {
17063         int cacheline_size;
17064         u8 byte;
17065         int goal;
17066
17067         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17068         if (byte == 0)
17069                 cacheline_size = 1024;
17070         else
17071                 cacheline_size = (int) byte * 4;
17072
17073         /* On 5703 and later chips, the boundary bits have no
17074          * effect.
17075          */
17076         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17077             tg3_asic_rev(tp) != ASIC_REV_5701 &&
17078             !tg3_flag(tp, PCI_EXPRESS))
17079                 goto out;
17080
17081 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17082         goal = BOUNDARY_MULTI_CACHELINE;
17083 #else
17084 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17085         goal = BOUNDARY_SINGLE_CACHELINE;
17086 #else
17087         goal = 0;
17088 #endif
17089 #endif
17090
17091         if (tg3_flag(tp, 57765_PLUS)) {
17092                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17093                 goto out;
17094         }
17095
17096         if (!goal)
17097                 goto out;
17098
17099         /* PCI controllers on most RISC systems tend to disconnect
17100          * when a device tries to burst across a cache-line boundary.
17101          * Therefore, letting tg3 do so just wastes PCI bandwidth.
17102          *
17103          * Unfortunately, for PCI-E there are only limited
17104          * write-side controls for this, and thus for reads
17105          * we will still get the disconnects.  We'll also waste
17106          * these PCI cycles for both read and write for chips
17107          * other than 5700 and 5701 which do not implement the
17108          * boundary bits.
17109          */
17110         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17111                 switch (cacheline_size) {
17112                 case 16:
17113                 case 32:
17114                 case 64:
17115                 case 128:
17116                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17117                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17118                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17119                         } else {
17120                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17121                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17122                         }
17123                         break;
17124
17125                 case 256:
17126                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17127                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17128                         break;
17129
17130                 default:
17131                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17132                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17133                         break;
17134                 }
17135         } else if (tg3_flag(tp, PCI_EXPRESS)) {
17136                 switch (cacheline_size) {
17137                 case 16:
17138                 case 32:
17139                 case 64:
17140                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17141                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17142                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17143                                 break;
17144                         }
17145                         /* fallthrough */
17146                 case 128:
17147                 default:
17148                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17149                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17150                         break;
17151                 }
17152         } else {
17153                 switch (cacheline_size) {
17154                 case 16:
17155                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17156                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17157                                         DMA_RWCTRL_WRITE_BNDRY_16);
17158                                 break;
17159                         }
17160                         /* fallthrough */
17161                 case 32:
17162                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17163                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17164                                         DMA_RWCTRL_WRITE_BNDRY_32);
17165                                 break;
17166                         }
17167                         /* fallthrough */
17168                 case 64:
17169                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17170                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17171                                         DMA_RWCTRL_WRITE_BNDRY_64);
17172                                 break;
17173                         }
17174                         /* fallthrough */
17175                 case 128:
17176                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17177                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17178                                         DMA_RWCTRL_WRITE_BNDRY_128);
17179                                 break;
17180                         }
17181                         /* fallthrough */
17182                 case 256:
17183                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
17184                                 DMA_RWCTRL_WRITE_BNDRY_256);
17185                         break;
17186                 case 512:
17187                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
17188                                 DMA_RWCTRL_WRITE_BNDRY_512);
17189                         break;
17190                 case 1024:
17191                 default:
17192                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17193                                 DMA_RWCTRL_WRITE_BNDRY_1024);
17194                         break;
17195                 }
17196         }
17197
17198 out:
17199         return val;
17200 }
17201
17202 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17203                            int size, bool to_device)
17204 {
17205         struct tg3_internal_buffer_desc test_desc;
17206         u32 sram_dma_descs;
17207         int i, ret;
17208
17209         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17210
17211         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17212         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17213         tw32(RDMAC_STATUS, 0);
17214         tw32(WDMAC_STATUS, 0);
17215
17216         tw32(BUFMGR_MODE, 0);
17217         tw32(FTQ_RESET, 0);
17218
17219         test_desc.addr_hi = ((u64) buf_dma) >> 32;
17220         test_desc.addr_lo = buf_dma & 0xffffffff;
17221         test_desc.nic_mbuf = 0x00002100;
17222         test_desc.len = size;
17223
17224         /*
17225          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17226          * the *second* time the tg3 driver was getting loaded after an
17227          * initial scan.
17228          *
17229          * Broadcom tells me:
17230          *   ...the DMA engine is connected to the GRC block and a DMA
17231          *   reset may affect the GRC block in some unpredictable way...
17232          *   The behavior of resets to individual blocks has not been tested.
17233          *
17234          * Broadcom noted the GRC reset will also reset all sub-components.
17235          */
17236         if (to_device) {
17237                 test_desc.cqid_sqid = (13 << 8) | 2;
17238
17239                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17240                 udelay(40);
17241         } else {
17242                 test_desc.cqid_sqid = (16 << 8) | 7;
17243
17244                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17245                 udelay(40);
17246         }
17247         test_desc.flags = 0x00000005;
17248
17249         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17250                 u32 val;
17251
17252                 val = *(((u32 *)&test_desc) + i);
17253                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17254                                        sram_dma_descs + (i * sizeof(u32)));
17255                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17256         }
17257         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17258
17259         if (to_device)
17260                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17261         else
17262                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17263
17264         ret = -ENODEV;
17265         for (i = 0; i < 40; i++) {
17266                 u32 val;
17267
17268                 if (to_device)
17269                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17270                 else
17271                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17272                 if ((val & 0xffff) == sram_dma_descs) {
17273                         ret = 0;
17274                         break;
17275                 }
17276
17277                 udelay(100);
17278         }
17279
17280         return ret;
17281 }
17282
17283 #define TEST_BUFFER_SIZE        0x2000
17284
17285 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17286         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17287         { },
17288 };
17289
17290 static int tg3_test_dma(struct tg3 *tp)
17291 {
17292         dma_addr_t buf_dma;
17293         u32 *buf, saved_dma_rwctrl;
17294         int ret = 0;
17295
17296         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17297                                  &buf_dma, GFP_KERNEL);
17298         if (!buf) {
17299                 ret = -ENOMEM;
17300                 goto out_nofree;
17301         }
17302
17303         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17304                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17305
17306         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17307
17308         if (tg3_flag(tp, 57765_PLUS))
17309                 goto out;
17310
17311         if (tg3_flag(tp, PCI_EXPRESS)) {
17312                 /* DMA read watermark not used on PCIE */
17313                 tp->dma_rwctrl |= 0x00180000;
17314         } else if (!tg3_flag(tp, PCIX_MODE)) {
17315                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17316                     tg3_asic_rev(tp) == ASIC_REV_5750)
17317                         tp->dma_rwctrl |= 0x003f0000;
17318                 else
17319                         tp->dma_rwctrl |= 0x003f000f;
17320         } else {
17321                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17322                     tg3_asic_rev(tp) == ASIC_REV_5704) {
17323                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17324                         u32 read_water = 0x7;
17325
17326                         /* If the 5704 is behind the EPB bridge, we can
17327                          * do the less restrictive ONE_DMA workaround for
17328                          * better performance.
17329                          */
17330                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17331                             tg3_asic_rev(tp) == ASIC_REV_5704)
17332                                 tp->dma_rwctrl |= 0x8000;
17333                         else if (ccval == 0x6 || ccval == 0x7)
17334                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17335
17336                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
17337                                 read_water = 4;
17338                         /* Set bit 23 to enable PCIX hw bug fix */
17339                         tp->dma_rwctrl |=
17340                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17341                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17342                                 (1 << 23);
17343                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17344                         /* 5780 always in PCIX mode */
17345                         tp->dma_rwctrl |= 0x00144000;
17346                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17347                         /* 5714 always in PCIX mode */
17348                         tp->dma_rwctrl |= 0x00148000;
17349                 } else {
17350                         tp->dma_rwctrl |= 0x001b000f;
17351                 }
17352         }
17353         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17354                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17355
17356         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17357             tg3_asic_rev(tp) == ASIC_REV_5704)
17358                 tp->dma_rwctrl &= 0xfffffff0;
17359
17360         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17361             tg3_asic_rev(tp) == ASIC_REV_5701) {
17362                 /* Remove this if it causes problems for some boards. */
17363                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17364
17365                 /* On 5700/5701 chips, we need to set this bit.
17366                  * Otherwise the chip will issue cacheline transactions
17367                  * to streamable DMA memory with not all the byte
17368                  * enables turned on.  This is an error on several
17369                  * RISC PCI controllers, in particular sparc64.
17370                  *
17371                  * On 5703/5704 chips, this bit has been reassigned
17372                  * a different meaning.  In particular, it is used
17373                  * on those chips to enable a PCI-X workaround.
17374                  */
17375                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17376         }
17377
17378         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17379
17380
17381         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17382             tg3_asic_rev(tp) != ASIC_REV_5701)
17383                 goto out;
17384
17385         /* It is best to perform DMA test with maximum write burst size
17386          * to expose the 5700/5701 write DMA bug.
17387          */
17388         saved_dma_rwctrl = tp->dma_rwctrl;
17389         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17390         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17391
17392         while (1) {
17393                 u32 *p = buf, i;
17394
17395                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17396                         p[i] = i;
17397
17398                 /* Send the buffer to the chip. */
17399                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17400                 if (ret) {
17401                         dev_err(&tp->pdev->dev,
17402                                 "%s: Buffer write failed. err = %d\n",
17403                                 __func__, ret);
17404                         break;
17405                 }
17406
17407                 /* Now read it back. */
17408                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17409                 if (ret) {
17410                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17411                                 "err = %d\n", __func__, ret);
17412                         break;
17413                 }
17414
17415                 /* Verify it. */
17416                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17417                         if (p[i] == i)
17418                                 continue;
17419
17420                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17421                             DMA_RWCTRL_WRITE_BNDRY_16) {
17422                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17423                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17424                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17425                                 break;
17426                         } else {
17427                                 dev_err(&tp->pdev->dev,
17428                                         "%s: Buffer corrupted on read back! "
17429                                         "(%d != %d)\n", __func__, p[i], i);
17430                                 ret = -ENODEV;
17431                                 goto out;
17432                         }
17433                 }
17434
17435                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17436                         /* Success. */
17437                         ret = 0;
17438                         break;
17439                 }
17440         }
17441         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17442             DMA_RWCTRL_WRITE_BNDRY_16) {
17443                 /* DMA test passed without adjusting DMA boundary,
17444                  * now look for chipsets that are known to expose the
17445                  * DMA bug without failing the test.
17446                  */
17447                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17448                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17449                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17450                 } else {
17451                         /* Safe to use the calculated DMA boundary. */
17452                         tp->dma_rwctrl = saved_dma_rwctrl;
17453                 }
17454
17455                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17456         }
17457
17458 out:
17459         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17460 out_nofree:
17461         return ret;
17462 }
17463
17464 static void tg3_init_bufmgr_config(struct tg3 *tp)
17465 {
17466         if (tg3_flag(tp, 57765_PLUS)) {
17467                 tp->bufmgr_config.mbuf_read_dma_low_water =
17468                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17469                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17470                         DEFAULT_MB_MACRX_LOW_WATER_57765;
17471                 tp->bufmgr_config.mbuf_high_water =
17472                         DEFAULT_MB_HIGH_WATER_57765;
17473
17474                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17475                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17476                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17477                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17478                 tp->bufmgr_config.mbuf_high_water_jumbo =
17479                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17480         } else if (tg3_flag(tp, 5705_PLUS)) {
17481                 tp->bufmgr_config.mbuf_read_dma_low_water =
17482                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17483                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17484                         DEFAULT_MB_MACRX_LOW_WATER_5705;
17485                 tp->bufmgr_config.mbuf_high_water =
17486                         DEFAULT_MB_HIGH_WATER_5705;
17487                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17488                         tp->bufmgr_config.mbuf_mac_rx_low_water =
17489                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
17490                         tp->bufmgr_config.mbuf_high_water =
17491                                 DEFAULT_MB_HIGH_WATER_5906;
17492                 }
17493
17494                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17495                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17496                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17497                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17498                 tp->bufmgr_config.mbuf_high_water_jumbo =
17499                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17500         } else {
17501                 tp->bufmgr_config.mbuf_read_dma_low_water =
17502                         DEFAULT_MB_RDMA_LOW_WATER;
17503                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17504                         DEFAULT_MB_MACRX_LOW_WATER;
17505                 tp->bufmgr_config.mbuf_high_water =
17506                         DEFAULT_MB_HIGH_WATER;
17507
17508                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17509                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17510                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17511                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17512                 tp->bufmgr_config.mbuf_high_water_jumbo =
17513                         DEFAULT_MB_HIGH_WATER_JUMBO;
17514         }
17515
17516         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17517         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17518 }
17519
17520 static char *tg3_phy_string(struct tg3 *tp)
17521 {
17522         switch (tp->phy_id & TG3_PHY_ID_MASK) {
17523         case TG3_PHY_ID_BCM5400:        return "5400";
17524         case TG3_PHY_ID_BCM5401:        return "5401";
17525         case TG3_PHY_ID_BCM5411:        return "5411";
17526         case TG3_PHY_ID_BCM5701:        return "5701";
17527         case TG3_PHY_ID_BCM5703:        return "5703";
17528         case TG3_PHY_ID_BCM5704:        return "5704";
17529         case TG3_PHY_ID_BCM5705:        return "5705";
17530         case TG3_PHY_ID_BCM5750:        return "5750";
17531         case TG3_PHY_ID_BCM5752:        return "5752";
17532         case TG3_PHY_ID_BCM5714:        return "5714";
17533         case TG3_PHY_ID_BCM5780:        return "5780";
17534         case TG3_PHY_ID_BCM5755:        return "5755";
17535         case TG3_PHY_ID_BCM5787:        return "5787";
17536         case TG3_PHY_ID_BCM5784:        return "5784";
17537         case TG3_PHY_ID_BCM5756:        return "5722/5756";
17538         case TG3_PHY_ID_BCM5906:        return "5906";
17539         case TG3_PHY_ID_BCM5761:        return "5761";
17540         case TG3_PHY_ID_BCM5718C:       return "5718C";
17541         case TG3_PHY_ID_BCM5718S:       return "5718S";
17542         case TG3_PHY_ID_BCM57765:       return "57765";
17543         case TG3_PHY_ID_BCM5719C:       return "5719C";
17544         case TG3_PHY_ID_BCM5720C:       return "5720C";
17545         case TG3_PHY_ID_BCM5762:        return "5762C";
17546         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
17547         case 0:                 return "serdes";
17548         default:                return "unknown";
17549         }
17550 }
17551
17552 static char *tg3_bus_string(struct tg3 *tp, char *str)
17553 {
17554         if (tg3_flag(tp, PCI_EXPRESS)) {
17555                 strcpy(str, "PCI Express");
17556                 return str;
17557         } else if (tg3_flag(tp, PCIX_MODE)) {
17558                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17559
17560                 strcpy(str, "PCIX:");
17561
17562                 if ((clock_ctrl == 7) ||
17563                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17564                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17565                         strcat(str, "133MHz");
17566                 else if (clock_ctrl == 0)
17567                         strcat(str, "33MHz");
17568                 else if (clock_ctrl == 2)
17569                         strcat(str, "50MHz");
17570                 else if (clock_ctrl == 4)
17571                         strcat(str, "66MHz");
17572                 else if (clock_ctrl == 6)
17573                         strcat(str, "100MHz");
17574         } else {
17575                 strcpy(str, "PCI:");
17576                 if (tg3_flag(tp, PCI_HIGH_SPEED))
17577                         strcat(str, "66MHz");
17578                 else
17579                         strcat(str, "33MHz");
17580         }
17581         if (tg3_flag(tp, PCI_32BIT))
17582                 strcat(str, ":32-bit");
17583         else
17584                 strcat(str, ":64-bit");
17585         return str;
17586 }
17587
17588 static void tg3_init_coal(struct tg3 *tp)
17589 {
17590         struct ethtool_coalesce *ec = &tp->coal;
17591
17592         memset(ec, 0, sizeof(*ec));
17593         ec->cmd = ETHTOOL_GCOALESCE;
17594         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17595         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17596         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17597         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17598         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17599         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17600         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17601         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17602         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17603
17604         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17605                                  HOSTCC_MODE_CLRTICK_TXBD)) {
17606                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17607                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17608                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17609                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17610         }
17611
17612         if (tg3_flag(tp, 5705_PLUS)) {
17613                 ec->rx_coalesce_usecs_irq = 0;
17614                 ec->tx_coalesce_usecs_irq = 0;
17615                 ec->stats_block_coalesce_usecs = 0;
17616         }
17617 }
17618
17619 static int tg3_init_one(struct pci_dev *pdev,
17620                                   const struct pci_device_id *ent)
17621 {
17622         struct net_device *dev;
17623         struct tg3 *tp;
17624         int i, err;
17625         u32 sndmbx, rcvmbx, intmbx;
17626         char str[40];
17627         u64 dma_mask, persist_dma_mask;
17628         netdev_features_t features = 0;
17629
17630         printk_once(KERN_INFO "%s\n", version);
17631
17632         err = pci_enable_device(pdev);
17633         if (err) {
17634                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17635                 return err;
17636         }
17637
17638         err = pci_request_regions(pdev, DRV_MODULE_NAME);
17639         if (err) {
17640                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17641                 goto err_out_disable_pdev;
17642         }
17643
17644         pci_set_master(pdev);
17645
17646         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17647         if (!dev) {
17648                 err = -ENOMEM;
17649                 goto err_out_free_res;
17650         }
17651
17652         SET_NETDEV_DEV(dev, &pdev->dev);
17653
17654         tp = netdev_priv(dev);
17655         tp->pdev = pdev;
17656         tp->dev = dev;
17657         tp->rx_mode = TG3_DEF_RX_MODE;
17658         tp->tx_mode = TG3_DEF_TX_MODE;
17659         tp->irq_sync = 1;
17660         tp->pcierr_recovery = false;
17661
17662         if (tg3_debug > 0)
17663                 tp->msg_enable = tg3_debug;
17664         else
17665                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17666
17667         if (pdev_is_ssb_gige_core(pdev)) {
17668                 tg3_flag_set(tp, IS_SSB_CORE);
17669                 if (ssb_gige_must_flush_posted_writes(pdev))
17670                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17671                 if (ssb_gige_one_dma_at_once(pdev))
17672                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17673                 if (ssb_gige_have_roboswitch(pdev)) {
17674                         tg3_flag_set(tp, USE_PHYLIB);
17675                         tg3_flag_set(tp, ROBOSWITCH);
17676                 }
17677                 if (ssb_gige_is_rgmii(pdev))
17678                         tg3_flag_set(tp, RGMII_MODE);
17679         }
17680
17681         /* The word/byte swap controls here control register access byte
17682          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17683          * setting below.
17684          */
17685         tp->misc_host_ctrl =
17686                 MISC_HOST_CTRL_MASK_PCI_INT |
17687                 MISC_HOST_CTRL_WORD_SWAP |
17688                 MISC_HOST_CTRL_INDIR_ACCESS |
17689                 MISC_HOST_CTRL_PCISTATE_RW;
17690
17691         /* The NONFRM (non-frame) byte/word swap controls take effect
17692          * on descriptor entries, anything which isn't packet data.
17693          *
17694          * The StrongARM chips on the board (one for tx, one for rx)
17695          * are running in big-endian mode.
17696          */
17697         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17698                         GRC_MODE_WSWAP_NONFRM_DATA);
17699 #ifdef __BIG_ENDIAN
17700         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17701 #endif
17702         spin_lock_init(&tp->lock);
17703         spin_lock_init(&tp->indirect_lock);
17704         INIT_WORK(&tp->reset_task, tg3_reset_task);
17705
17706         tp->regs = pci_ioremap_bar(pdev, BAR_0);
17707         if (!tp->regs) {
17708                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17709                 err = -ENOMEM;
17710                 goto err_out_free_dev;
17711         }
17712
17713         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17714             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17715             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17716             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17717             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17718             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17719             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17720             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17721             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17722             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17723             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17724             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17725             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17726             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17727             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17728                 tg3_flag_set(tp, ENABLE_APE);
17729                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17730                 if (!tp->aperegs) {
17731                         dev_err(&pdev->dev,
17732                                 "Cannot map APE registers, aborting\n");
17733                         err = -ENOMEM;
17734                         goto err_out_iounmap;
17735                 }
17736         }
17737
17738         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17739         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17740
17741         dev->ethtool_ops = &tg3_ethtool_ops;
17742         dev->watchdog_timeo = TG3_TX_TIMEOUT;
17743         dev->netdev_ops = &tg3_netdev_ops;
17744         dev->irq = pdev->irq;
17745
17746         err = tg3_get_invariants(tp, ent);
17747         if (err) {
17748                 dev_err(&pdev->dev,
17749                         "Problem fetching invariants of chip, aborting\n");
17750                 goto err_out_apeunmap;
17751         }
17752
17753         /* The EPB bridge inside 5714, 5715, and 5780 and any
17754          * device behind the EPB cannot support DMA addresses > 40-bit.
17755          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17756          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17757          * do DMA address check in tg3_start_xmit().
17758          */
17759         if (tg3_flag(tp, IS_5788))
17760                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17761         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17762                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17763 #ifdef CONFIG_HIGHMEM
17764                 dma_mask = DMA_BIT_MASK(64);
17765 #endif
17766         } else
17767                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17768
17769         /* Configure DMA attributes. */
17770         if (dma_mask > DMA_BIT_MASK(32)) {
17771                 err = pci_set_dma_mask(pdev, dma_mask);
17772                 if (!err) {
17773                         features |= NETIF_F_HIGHDMA;
17774                         err = pci_set_consistent_dma_mask(pdev,
17775                                                           persist_dma_mask);
17776                         if (err < 0) {
17777                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17778                                         "DMA for consistent allocations\n");
17779                                 goto err_out_apeunmap;
17780                         }
17781                 }
17782         }
17783         if (err || dma_mask == DMA_BIT_MASK(32)) {
17784                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17785                 if (err) {
17786                         dev_err(&pdev->dev,
17787                                 "No usable DMA configuration, aborting\n");
17788                         goto err_out_apeunmap;
17789                 }
17790         }
17791
17792         tg3_init_bufmgr_config(tp);
17793
17794         /* 5700 B0 chips do not support checksumming correctly due
17795          * to hardware bugs.
17796          */
17797         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17798                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17799
17800                 if (tg3_flag(tp, 5755_PLUS))
17801                         features |= NETIF_F_IPV6_CSUM;
17802         }
17803
17804         /* TSO is on by default on chips that support hardware TSO.
17805          * Firmware TSO on older chips gives lower performance, so it
17806          * is off by default, but can be enabled using ethtool.
17807          */
17808         if ((tg3_flag(tp, HW_TSO_1) ||
17809              tg3_flag(tp, HW_TSO_2) ||
17810              tg3_flag(tp, HW_TSO_3)) &&
17811             (features & NETIF_F_IP_CSUM))
17812                 features |= NETIF_F_TSO;
17813         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17814                 if (features & NETIF_F_IPV6_CSUM)
17815                         features |= NETIF_F_TSO6;
17816                 if (tg3_flag(tp, HW_TSO_3) ||
17817                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
17818                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17819                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17820                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
17821                     tg3_asic_rev(tp) == ASIC_REV_57780)
17822                         features |= NETIF_F_TSO_ECN;
17823         }
17824
17825         dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17826                          NETIF_F_HW_VLAN_CTAG_RX;
17827         dev->vlan_features |= features;
17828
17829         /*
17830          * Add loopback capability only for a subset of devices that support
17831          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17832          * loopback for the remaining devices.
17833          */
17834         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17835             !tg3_flag(tp, CPMU_PRESENT))
17836                 /* Add the loopback capability */
17837                 features |= NETIF_F_LOOPBACK;
17838
17839         dev->hw_features |= features;
17840         dev->priv_flags |= IFF_UNICAST_FLT;
17841
17842         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17843             !tg3_flag(tp, TSO_CAPABLE) &&
17844             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17845                 tg3_flag_set(tp, MAX_RXPEND_64);
17846                 tp->rx_pending = 63;
17847         }
17848
17849         err = tg3_get_device_address(tp);
17850         if (err) {
17851                 dev_err(&pdev->dev,
17852                         "Could not obtain valid ethernet address, aborting\n");
17853                 goto err_out_apeunmap;
17854         }
17855
17856         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17857         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17858         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17859         for (i = 0; i < tp->irq_max; i++) {
17860                 struct tg3_napi *tnapi = &tp->napi[i];
17861
17862                 tnapi->tp = tp;
17863                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17864
17865                 tnapi->int_mbox = intmbx;
17866                 if (i <= 4)
17867                         intmbx += 0x8;
17868                 else
17869                         intmbx += 0x4;
17870
17871                 tnapi->consmbox = rcvmbx;
17872                 tnapi->prodmbox = sndmbx;
17873
17874                 if (i)
17875                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17876                 else
17877                         tnapi->coal_now = HOSTCC_MODE_NOW;
17878
17879                 if (!tg3_flag(tp, SUPPORT_MSIX))
17880                         break;
17881
17882                 /*
17883                  * If we support MSIX, we'll be using RSS.  If we're using
17884                  * RSS, the first vector only handles link interrupts and the
17885                  * remaining vectors handle rx and tx interrupts.  Reuse the
17886                  * mailbox values for the next iteration.  The values we setup
17887                  * above are still useful for the single vectored mode.
17888                  */
17889                 if (!i)
17890                         continue;
17891
17892                 rcvmbx += 0x8;
17893
17894                 if (sndmbx & 0x4)
17895                         sndmbx -= 0x4;
17896                 else
17897                         sndmbx += 0xc;
17898         }
17899
17900         /*
17901          * Reset chip in case UNDI or EFI driver did not shutdown
17902          * DMA self test will enable WDMAC and we'll see (spurious)
17903          * pending DMA on the PCI bus at that point.
17904          */
17905         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17906             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17907                 tg3_full_lock(tp, 0);
17908                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17909                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17910                 tg3_full_unlock(tp);
17911         }
17912
17913         err = tg3_test_dma(tp);
17914         if (err) {
17915                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17916                 goto err_out_apeunmap;
17917         }
17918
17919         tg3_init_coal(tp);
17920
17921         pci_set_drvdata(pdev, dev);
17922
17923         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17924             tg3_asic_rev(tp) == ASIC_REV_5720 ||
17925             tg3_asic_rev(tp) == ASIC_REV_5762)
17926                 tg3_flag_set(tp, PTP_CAPABLE);
17927
17928         tg3_timer_init(tp);
17929
17930         tg3_carrier_off(tp);
17931
17932         err = register_netdev(dev);
17933         if (err) {
17934                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17935                 goto err_out_apeunmap;
17936         }
17937
17938         if (tg3_flag(tp, PTP_CAPABLE)) {
17939                 tg3_ptp_init(tp);
17940                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17941                                                    &tp->pdev->dev);
17942                 if (IS_ERR(tp->ptp_clock))
17943                         tp->ptp_clock = NULL;
17944         }
17945
17946         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17947                     tp->board_part_number,
17948                     tg3_chip_rev_id(tp),
17949                     tg3_bus_string(tp, str),
17950                     dev->dev_addr);
17951
17952         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17953                 char *ethtype;
17954
17955                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17956                         ethtype = "10/100Base-TX";
17957                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17958                         ethtype = "1000Base-SX";
17959                 else
17960                         ethtype = "10/100/1000Base-T";
17961
17962                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17963                             "(WireSpeed[%d], EEE[%d])\n",
17964                             tg3_phy_string(tp), ethtype,
17965                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17966                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17967         }
17968
17969         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17970                     (dev->features & NETIF_F_RXCSUM) != 0,
17971                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
17972                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17973                     tg3_flag(tp, ENABLE_ASF) != 0,
17974                     tg3_flag(tp, TSO_CAPABLE) != 0);
17975         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17976                     tp->dma_rwctrl,
17977                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17978                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17979
17980         pci_save_state(pdev);
17981
17982         return 0;
17983
17984 err_out_apeunmap:
17985         if (tp->aperegs) {
17986                 iounmap(tp->aperegs);
17987                 tp->aperegs = NULL;
17988         }
17989
17990 err_out_iounmap:
17991         if (tp->regs) {
17992                 iounmap(tp->regs);
17993                 tp->regs = NULL;
17994         }
17995
17996 err_out_free_dev:
17997         free_netdev(dev);
17998
17999 err_out_free_res:
18000         pci_release_regions(pdev);
18001
18002 err_out_disable_pdev:
18003         if (pci_is_enabled(pdev))
18004                 pci_disable_device(pdev);
18005         return err;
18006 }
18007
18008 static void tg3_remove_one(struct pci_dev *pdev)
18009 {
18010         struct net_device *dev = pci_get_drvdata(pdev);
18011
18012         if (dev) {
18013                 struct tg3 *tp = netdev_priv(dev);
18014
18015                 tg3_ptp_fini(tp);
18016
18017                 release_firmware(tp->fw);
18018
18019                 tg3_reset_task_cancel(tp);
18020
18021                 if (tg3_flag(tp, USE_PHYLIB)) {
18022                         tg3_phy_fini(tp);
18023                         tg3_mdio_fini(tp);
18024                 }
18025
18026                 unregister_netdev(dev);
18027                 if (tp->aperegs) {
18028                         iounmap(tp->aperegs);
18029                         tp->aperegs = NULL;
18030                 }
18031                 if (tp->regs) {
18032                         iounmap(tp->regs);
18033                         tp->regs = NULL;
18034                 }
18035                 free_netdev(dev);
18036                 pci_release_regions(pdev);
18037                 pci_disable_device(pdev);
18038         }
18039 }
18040
18041 #ifdef CONFIG_PM_SLEEP
18042 static int tg3_suspend(struct device *device)
18043 {
18044         struct pci_dev *pdev = to_pci_dev(device);
18045         struct net_device *dev = pci_get_drvdata(pdev);
18046         struct tg3 *tp = netdev_priv(dev);
18047         int err = 0;
18048
18049         rtnl_lock();
18050
18051         if (!netif_running(dev))
18052                 goto unlock;
18053
18054         tg3_reset_task_cancel(tp);
18055         tg3_phy_stop(tp);
18056         tg3_netif_stop(tp);
18057
18058         tg3_timer_stop(tp);
18059
18060         tg3_full_lock(tp, 1);
18061         tg3_disable_ints(tp);
18062         tg3_full_unlock(tp);
18063
18064         netif_device_detach(dev);
18065
18066         tg3_full_lock(tp, 0);
18067         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18068         tg3_flag_clear(tp, INIT_COMPLETE);
18069         tg3_full_unlock(tp);
18070
18071         err = tg3_power_down_prepare(tp);
18072         if (err) {
18073                 int err2;
18074
18075                 tg3_full_lock(tp, 0);
18076
18077                 tg3_flag_set(tp, INIT_COMPLETE);
18078                 err2 = tg3_restart_hw(tp, true);
18079                 if (err2)
18080                         goto out;
18081
18082                 tg3_timer_start(tp);
18083
18084                 netif_device_attach(dev);
18085                 tg3_netif_start(tp);
18086
18087 out:
18088                 tg3_full_unlock(tp);
18089
18090                 if (!err2)
18091                         tg3_phy_start(tp);
18092         }
18093
18094 unlock:
18095         rtnl_unlock();
18096         return err;
18097 }
18098
18099 static int tg3_resume(struct device *device)
18100 {
18101         struct pci_dev *pdev = to_pci_dev(device);
18102         struct net_device *dev = pci_get_drvdata(pdev);
18103         struct tg3 *tp = netdev_priv(dev);
18104         int err = 0;
18105
18106         rtnl_lock();
18107
18108         if (!netif_running(dev))
18109                 goto unlock;
18110
18111         netif_device_attach(dev);
18112
18113         tg3_full_lock(tp, 0);
18114
18115         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18116
18117         tg3_flag_set(tp, INIT_COMPLETE);
18118         err = tg3_restart_hw(tp,
18119                              !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18120         if (err)
18121                 goto out;
18122
18123         tg3_timer_start(tp);
18124
18125         tg3_netif_start(tp);
18126
18127 out:
18128         tg3_full_unlock(tp);
18129
18130         if (!err)
18131                 tg3_phy_start(tp);
18132
18133 unlock:
18134         rtnl_unlock();
18135         return err;
18136 }
18137 #endif /* CONFIG_PM_SLEEP */
18138
18139 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18140
18141 static void tg3_shutdown(struct pci_dev *pdev)
18142 {
18143         struct net_device *dev = pci_get_drvdata(pdev);
18144         struct tg3 *tp = netdev_priv(dev);
18145
18146         rtnl_lock();
18147         netif_device_detach(dev);
18148
18149         if (netif_running(dev))
18150                 dev_close(dev);
18151
18152         if (system_state == SYSTEM_POWER_OFF)
18153                 tg3_power_down(tp);
18154
18155         rtnl_unlock();
18156 }
18157
18158 /**
18159  * tg3_io_error_detected - called when PCI error is detected
18160  * @pdev: Pointer to PCI device
18161  * @state: The current pci connection state
18162  *
18163  * This function is called after a PCI bus error affecting
18164  * this device has been detected.
18165  */
18166 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18167                                               pci_channel_state_t state)
18168 {
18169         struct net_device *netdev = pci_get_drvdata(pdev);
18170         struct tg3 *tp = netdev_priv(netdev);
18171         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18172
18173         netdev_info(netdev, "PCI I/O error detected\n");
18174
18175         rtnl_lock();
18176
18177         /* Could be second call or maybe we don't have netdev yet */
18178         if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
18179                 goto done;
18180
18181         /* We needn't recover from permanent error */
18182         if (state == pci_channel_io_frozen)
18183                 tp->pcierr_recovery = true;
18184
18185         tg3_phy_stop(tp);
18186
18187         tg3_netif_stop(tp);
18188
18189         tg3_timer_stop(tp);
18190
18191         /* Want to make sure that the reset task doesn't run */
18192         tg3_reset_task_cancel(tp);
18193
18194         netif_device_detach(netdev);
18195
18196         /* Clean up software state, even if MMIO is blocked */
18197         tg3_full_lock(tp, 0);
18198         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18199         tg3_full_unlock(tp);
18200
18201 done:
18202         if (state == pci_channel_io_perm_failure) {
18203                 if (netdev) {
18204                         tg3_napi_enable(tp);
18205                         dev_close(netdev);
18206                 }
18207                 err = PCI_ERS_RESULT_DISCONNECT;
18208         } else {
18209                 pci_disable_device(pdev);
18210         }
18211
18212         rtnl_unlock();
18213
18214         return err;
18215 }
18216
18217 /**
18218  * tg3_io_slot_reset - called after the pci bus has been reset.
18219  * @pdev: Pointer to PCI device
18220  *
18221  * Restart the card from scratch, as if from a cold-boot.
18222  * At this point, the card has exprienced a hard reset,
18223  * followed by fixups by BIOS, and has its config space
18224  * set up identically to what it was at cold boot.
18225  */
18226 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18227 {
18228         struct net_device *netdev = pci_get_drvdata(pdev);
18229         struct tg3 *tp = netdev_priv(netdev);
18230         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18231         int err;
18232
18233         rtnl_lock();
18234
18235         if (pci_enable_device(pdev)) {
18236                 dev_err(&pdev->dev,
18237                         "Cannot re-enable PCI device after reset.\n");
18238                 goto done;
18239         }
18240
18241         pci_set_master(pdev);
18242         pci_restore_state(pdev);
18243         pci_save_state(pdev);
18244
18245         if (!netdev || !netif_running(netdev)) {
18246                 rc = PCI_ERS_RESULT_RECOVERED;
18247                 goto done;
18248         }
18249
18250         err = tg3_power_up(tp);
18251         if (err)
18252                 goto done;
18253
18254         rc = PCI_ERS_RESULT_RECOVERED;
18255
18256 done:
18257         if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18258                 tg3_napi_enable(tp);
18259                 dev_close(netdev);
18260         }
18261         rtnl_unlock();
18262
18263         return rc;
18264 }
18265
18266 /**
18267  * tg3_io_resume - called when traffic can start flowing again.
18268  * @pdev: Pointer to PCI device
18269  *
18270  * This callback is called when the error recovery driver tells
18271  * us that its OK to resume normal operation.
18272  */
18273 static void tg3_io_resume(struct pci_dev *pdev)
18274 {
18275         struct net_device *netdev = pci_get_drvdata(pdev);
18276         struct tg3 *tp = netdev_priv(netdev);
18277         int err;
18278
18279         rtnl_lock();
18280
18281         if (!netdev || !netif_running(netdev))
18282                 goto done;
18283
18284         tg3_full_lock(tp, 0);
18285         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18286         tg3_flag_set(tp, INIT_COMPLETE);
18287         err = tg3_restart_hw(tp, true);
18288         if (err) {
18289                 tg3_full_unlock(tp);
18290                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18291                 goto done;
18292         }
18293
18294         netif_device_attach(netdev);
18295
18296         tg3_timer_start(tp);
18297
18298         tg3_netif_start(tp);
18299
18300         tg3_full_unlock(tp);
18301
18302         tg3_phy_start(tp);
18303
18304 done:
18305         tp->pcierr_recovery = false;
18306         rtnl_unlock();
18307 }
18308
18309 static const struct pci_error_handlers tg3_err_handler = {
18310         .error_detected = tg3_io_error_detected,
18311         .slot_reset     = tg3_io_slot_reset,
18312         .resume         = tg3_io_resume
18313 };
18314
18315 static struct pci_driver tg3_driver = {
18316         .name           = DRV_MODULE_NAME,
18317         .id_table       = tg3_pci_tbl,
18318         .probe          = tg3_init_one,
18319         .remove         = tg3_remove_one,
18320         .err_handler    = &tg3_err_handler,
18321         .driver.pm      = &tg3_pm_ops,
18322         .shutdown       = tg3_shutdown,
18323 };
18324
18325 module_pci_driver(tg3_driver);