GNU Linux-libre 4.14.294-gnu1
[releases.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2014 Broadcom Corporation.
8  *
9 /*(DEBLOBBED)*/
10
11
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/stringify.h>
15 #include <linux/kernel.h>
16 #include <linux/sched/signal.h>
17 #include <linux/types.h>
18 #include <linux/compiler.h>
19 #include <linux/slab.h>
20 #include <linux/delay.h>
21 #include <linux/in.h>
22 #include <linux/interrupt.h>
23 #include <linux/ioport.h>
24 #include <linux/pci.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/ethtool.h>
29 #include <linux/mdio.h>
30 #include <linux/mii.h>
31 #include <linux/phy.h>
32 #include <linux/brcmphy.h>
33 #include <linux/if.h>
34 #include <linux/if_vlan.h>
35 #include <linux/ip.h>
36 #include <linux/tcp.h>
37 #include <linux/workqueue.h>
38 #include <linux/prefetch.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/firmware.h>
41 #include <linux/ssb/ssb_driver_gige.h>
42 #include <linux/hwmon.h>
43 #include <linux/hwmon-sysfs.h>
44
45 #include <net/checksum.h>
46 #include <net/ip.h>
47
48 #include <linux/io.h>
49 #include <asm/byteorder.h>
50 #include <linux/uaccess.h>
51
52 #include <uapi/linux/net_tstamp.h>
53 #include <linux/ptp_clock_kernel.h>
54
55 #ifdef CONFIG_SPARC
56 #include <asm/idprom.h>
57 #include <asm/prom.h>
58 #endif
59
60 #define BAR_0   0
61 #define BAR_2   2
62
63 #include "tg3.h"
64
65 /* Functions & macros to verify TG3_FLAGS types */
66
67 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
68 {
69         return test_bit(flag, bits);
70 }
71
72 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
73 {
74         set_bit(flag, bits);
75 }
76
77 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
78 {
79         clear_bit(flag, bits);
80 }
81
82 #define tg3_flag(tp, flag)                              \
83         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
84 #define tg3_flag_set(tp, flag)                          \
85         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_clear(tp, flag)                        \
87         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
88
89 #define DRV_MODULE_NAME         "tg3"
90 #define TG3_MAJ_NUM                     3
91 #define TG3_MIN_NUM                     137
92 #define DRV_MODULE_VERSION      \
93         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
94 #define DRV_MODULE_RELDATE      "May 11, 2014"
95
96 #define RESET_KIND_SHUTDOWN     0
97 #define RESET_KIND_INIT         1
98 #define RESET_KIND_SUSPEND      2
99
100 #define TG3_DEF_RX_MODE         0
101 #define TG3_DEF_TX_MODE         0
102 #define TG3_DEF_MSG_ENABLE        \
103         (NETIF_MSG_DRV          | \
104          NETIF_MSG_PROBE        | \
105          NETIF_MSG_LINK         | \
106          NETIF_MSG_TIMER        | \
107          NETIF_MSG_IFDOWN       | \
108          NETIF_MSG_IFUP         | \
109          NETIF_MSG_RX_ERR       | \
110          NETIF_MSG_TX_ERR)
111
112 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
113
114 /* length of time before we decide the hardware is borked,
115  * and dev->tx_timeout() should be called to fix the problem
116  */
117
118 #define TG3_TX_TIMEOUT                  (5 * HZ)
119
120 /* hardware minimum and maximum for a single frame's data payload */
121 #define TG3_MIN_MTU                     ETH_ZLEN
122 #define TG3_MAX_MTU(tp) \
123         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
124
125 /* These numbers seem to be hard coded in the NIC firmware somehow.
126  * You can't change the ring sizes, but you can change where you place
127  * them in the NIC onboard memory.
128  */
129 #define TG3_RX_STD_RING_SIZE(tp) \
130         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
131          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
132 #define TG3_DEF_RX_RING_PENDING         200
133 #define TG3_RX_JMB_RING_SIZE(tp) \
134         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
135          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
136 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
137
138 /* Do not place this n-ring entries value into the tp struct itself,
139  * we really want to expose these constants to GCC so that modulo et
140  * al.  operations are done with shifts and masks instead of with
141  * hw multiply/modulo instructions.  Another solution would be to
142  * replace things like '% foo' with '& (foo - 1)'.
143  */
144
145 #define TG3_TX_RING_SIZE                512
146 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
147
148 #define TG3_RX_STD_RING_BYTES(tp) \
149         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
150 #define TG3_RX_JMB_RING_BYTES(tp) \
151         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
152 #define TG3_RX_RCB_RING_BYTES(tp) \
153         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
154 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
155                                  TG3_TX_RING_SIZE)
156 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
157
158 #define TG3_DMA_BYTE_ENAB               64
159
160 #define TG3_RX_STD_DMA_SZ               1536
161 #define TG3_RX_JMB_DMA_SZ               9046
162
163 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
164
165 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
166 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
167
168 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
169         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
170
171 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
172         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
173
174 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
175  * that are at least dword aligned when used in PCIX mode.  The driver
176  * works around this bug by double copying the packet.  This workaround
177  * is built into the normal double copy length check for efficiency.
178  *
179  * However, the double copy is only necessary on those architectures
180  * where unaligned memory accesses are inefficient.  For those architectures
181  * where unaligned memory accesses incur little penalty, we can reintegrate
182  * the 5701 in the normal rx path.  Doing so saves a device structure
183  * dereference by hardcoding the double copy threshold in place.
184  */
185 #define TG3_RX_COPY_THRESHOLD           256
186 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
187         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
188 #else
189         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
190 #endif
191
192 #if (NET_IP_ALIGN != 0)
193 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
194 #else
195 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
196 #endif
197
198 /* minimum number of free TX descriptors required to wake up TX process */
199 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
200 #define TG3_TX_BD_DMA_MAX_2K            2048
201 #define TG3_TX_BD_DMA_MAX_4K            4096
202
203 #define TG3_RAW_IP_ALIGN 2
204
205 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
206 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
207
208 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
209 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
210
211 #define FIRMWARE_TG3            "/*(DEBLOBBED)*/"
212 #define FIRMWARE_TG357766       "/*(DEBLOBBED)*/"
213 #define FIRMWARE_TG3TSO         "/*(DEBLOBBED)*/"
214 #define FIRMWARE_TG3TSO5        "/*(DEBLOBBED)*/"
215
216 static char version[] =
217         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
218
219 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
220 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
221 MODULE_LICENSE("GPL");
222 MODULE_VERSION(DRV_MODULE_VERSION);
223 /*(DEBLOBBED)*/
224
225 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
226 module_param(tg3_debug, int, 0);
227 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
228
229 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
230 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
231
232 static const struct pci_device_id tg3_pci_tbl[] = {
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
252          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
253                         TG3_DRV_DATA_FLAG_5705_10_100},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
255          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
256                         TG3_DRV_DATA_FLAG_5705_10_100},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
259          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
260                         TG3_DRV_DATA_FLAG_5705_10_100},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
267          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
273          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
281         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
282                         PCI_VENDOR_ID_LENOVO,
283                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
284          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
287          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
306         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
307                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
308          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
309         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
310                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
311          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
312         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
313         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
314         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
315          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
316         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
317         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
318         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
319         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
320         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
322         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
325          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
327          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
329         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
330         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
332         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
334         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
335         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
336         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
338         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
339         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
340         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
343         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
344         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
345         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
346         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
347         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
348         {}
349 };
350
351 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
352
353 static const struct {
354         const char string[ETH_GSTRING_LEN];
355 } ethtool_stats_keys[] = {
356         { "rx_octets" },
357         { "rx_fragments" },
358         { "rx_ucast_packets" },
359         { "rx_mcast_packets" },
360         { "rx_bcast_packets" },
361         { "rx_fcs_errors" },
362         { "rx_align_errors" },
363         { "rx_xon_pause_rcvd" },
364         { "rx_xoff_pause_rcvd" },
365         { "rx_mac_ctrl_rcvd" },
366         { "rx_xoff_entered" },
367         { "rx_frame_too_long_errors" },
368         { "rx_jabbers" },
369         { "rx_undersize_packets" },
370         { "rx_in_length_errors" },
371         { "rx_out_length_errors" },
372         { "rx_64_or_less_octet_packets" },
373         { "rx_65_to_127_octet_packets" },
374         { "rx_128_to_255_octet_packets" },
375         { "rx_256_to_511_octet_packets" },
376         { "rx_512_to_1023_octet_packets" },
377         { "rx_1024_to_1522_octet_packets" },
378         { "rx_1523_to_2047_octet_packets" },
379         { "rx_2048_to_4095_octet_packets" },
380         { "rx_4096_to_8191_octet_packets" },
381         { "rx_8192_to_9022_octet_packets" },
382
383         { "tx_octets" },
384         { "tx_collisions" },
385
386         { "tx_xon_sent" },
387         { "tx_xoff_sent" },
388         { "tx_flow_control" },
389         { "tx_mac_errors" },
390         { "tx_single_collisions" },
391         { "tx_mult_collisions" },
392         { "tx_deferred" },
393         { "tx_excessive_collisions" },
394         { "tx_late_collisions" },
395         { "tx_collide_2times" },
396         { "tx_collide_3times" },
397         { "tx_collide_4times" },
398         { "tx_collide_5times" },
399         { "tx_collide_6times" },
400         { "tx_collide_7times" },
401         { "tx_collide_8times" },
402         { "tx_collide_9times" },
403         { "tx_collide_10times" },
404         { "tx_collide_11times" },
405         { "tx_collide_12times" },
406         { "tx_collide_13times" },
407         { "tx_collide_14times" },
408         { "tx_collide_15times" },
409         { "tx_ucast_packets" },
410         { "tx_mcast_packets" },
411         { "tx_bcast_packets" },
412         { "tx_carrier_sense_errors" },
413         { "tx_discards" },
414         { "tx_errors" },
415
416         { "dma_writeq_full" },
417         { "dma_write_prioq_full" },
418         { "rxbds_empty" },
419         { "rx_discards" },
420         { "rx_errors" },
421         { "rx_threshold_hit" },
422
423         { "dma_readq_full" },
424         { "dma_read_prioq_full" },
425         { "tx_comp_queue_full" },
426
427         { "ring_set_send_prod_index" },
428         { "ring_status_update" },
429         { "nic_irqs" },
430         { "nic_avoided_irqs" },
431         { "nic_tx_threshold_hit" },
432
433         { "mbuf_lwm_thresh_hit" },
434 };
435
436 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST          0
438 #define TG3_LINK_TEST           1
439 #define TG3_REGISTER_TEST       2
440 #define TG3_MEMORY_TEST         3
441 #define TG3_MAC_LOOPB_TEST      4
442 #define TG3_PHY_LOOPB_TEST      5
443 #define TG3_EXT_LOOPB_TEST      6
444 #define TG3_INTERRUPT_TEST      7
445
446
447 static const struct {
448         const char string[ETH_GSTRING_LEN];
449 } ethtool_test_keys[] = {
450         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
451         [TG3_LINK_TEST]         = { "link test         (online) " },
452         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
453         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
454         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
455         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
456         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
457         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
458 };
459
460 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
461
462
463 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
464 {
465         writel(val, tp->regs + off);
466 }
467
468 static u32 tg3_read32(struct tg3 *tp, u32 off)
469 {
470         return readl(tp->regs + off);
471 }
472
473 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
474 {
475         writel(val, tp->aperegs + off);
476 }
477
478 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
479 {
480         return readl(tp->aperegs + off);
481 }
482
483 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
484 {
485         unsigned long flags;
486
487         spin_lock_irqsave(&tp->indirect_lock, flags);
488         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
489         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
490         spin_unlock_irqrestore(&tp->indirect_lock, flags);
491 }
492
493 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
494 {
495         writel(val, tp->regs + off);
496         readl(tp->regs + off);
497 }
498
499 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
500 {
501         unsigned long flags;
502         u32 val;
503
504         spin_lock_irqsave(&tp->indirect_lock, flags);
505         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
506         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507         spin_unlock_irqrestore(&tp->indirect_lock, flags);
508         return val;
509 }
510
511 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
512 {
513         unsigned long flags;
514
515         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
516                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
517                                        TG3_64BIT_REG_LOW, val);
518                 return;
519         }
520         if (off == TG3_RX_STD_PROD_IDX_REG) {
521                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
522                                        TG3_64BIT_REG_LOW, val);
523                 return;
524         }
525
526         spin_lock_irqsave(&tp->indirect_lock, flags);
527         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
528         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
529         spin_unlock_irqrestore(&tp->indirect_lock, flags);
530
531         /* In indirect mode when disabling interrupts, we also need
532          * to clear the interrupt bit in the GRC local ctrl register.
533          */
534         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
535             (val == 0x1)) {
536                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
537                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
538         }
539 }
540
541 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
542 {
543         unsigned long flags;
544         u32 val;
545
546         spin_lock_irqsave(&tp->indirect_lock, flags);
547         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
548         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
549         spin_unlock_irqrestore(&tp->indirect_lock, flags);
550         return val;
551 }
552
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554  * where it is unsafe to read back the register without some delay.
555  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
557  */
558 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
559 {
560         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
561                 /* Non-posted methods */
562                 tp->write32(tp, off, val);
563         else {
564                 /* Posted method */
565                 tg3_write32(tp, off, val);
566                 if (usec_wait)
567                         udelay(usec_wait);
568                 tp->read32(tp, off);
569         }
570         /* Wait again after the read for the posted method to guarantee that
571          * the wait time is met.
572          */
573         if (usec_wait)
574                 udelay(usec_wait);
575 }
576
577 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
578 {
579         tp->write32_mbox(tp, off, val);
580         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
581             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
582              !tg3_flag(tp, ICH_WORKAROUND)))
583                 tp->read32_mbox(tp, off);
584 }
585
586 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
587 {
588         void __iomem *mbox = tp->regs + off;
589         writel(val, mbox);
590         if (tg3_flag(tp, TXD_MBOX_HWBUG))
591                 writel(val, mbox);
592         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
593             tg3_flag(tp, FLUSH_POSTED_WRITES))
594                 readl(mbox);
595 }
596
597 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
598 {
599         return readl(tp->regs + off + GRCMBOX_BASE);
600 }
601
602 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
603 {
604         writel(val, tp->regs + off + GRCMBOX_BASE);
605 }
606
607 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
612
613 #define tw32(reg, val)                  tp->write32(tp, reg, val)
614 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg)                       tp->read32(tp, reg)
617
618 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
619 {
620         unsigned long flags;
621
622         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
623             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
624                 return;
625
626         spin_lock_irqsave(&tp->indirect_lock, flags);
627         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
628                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
629                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
630
631                 /* Always leave this as zero. */
632                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
633         } else {
634                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
635                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
636
637                 /* Always leave this as zero. */
638                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
639         }
640         spin_unlock_irqrestore(&tp->indirect_lock, flags);
641 }
642
643 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
644 {
645         unsigned long flags;
646
647         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
648             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
649                 *val = 0;
650                 return;
651         }
652
653         spin_lock_irqsave(&tp->indirect_lock, flags);
654         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
655                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
656                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
657
658                 /* Always leave this as zero. */
659                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
660         } else {
661                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
662                 *val = tr32(TG3PCI_MEM_WIN_DATA);
663
664                 /* Always leave this as zero. */
665                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
666         }
667         spin_unlock_irqrestore(&tp->indirect_lock, flags);
668 }
669
670 static void tg3_ape_lock_init(struct tg3 *tp)
671 {
672         int i;
673         u32 regbase, bit;
674
675         if (tg3_asic_rev(tp) == ASIC_REV_5761)
676                 regbase = TG3_APE_LOCK_GRANT;
677         else
678                 regbase = TG3_APE_PER_LOCK_GRANT;
679
680         /* Make sure the driver hasn't any stale locks. */
681         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
682                 switch (i) {
683                 case TG3_APE_LOCK_PHY0:
684                 case TG3_APE_LOCK_PHY1:
685                 case TG3_APE_LOCK_PHY2:
686                 case TG3_APE_LOCK_PHY3:
687                         bit = APE_LOCK_GRANT_DRIVER;
688                         break;
689                 default:
690                         if (!tp->pci_fn)
691                                 bit = APE_LOCK_GRANT_DRIVER;
692                         else
693                                 bit = 1 << tp->pci_fn;
694                 }
695                 tg3_ape_write32(tp, regbase + 4 * i, bit);
696         }
697
698 }
699
700 static int tg3_ape_lock(struct tg3 *tp, int locknum)
701 {
702         int i, off;
703         int ret = 0;
704         u32 status, req, gnt, bit;
705
706         if (!tg3_flag(tp, ENABLE_APE))
707                 return 0;
708
709         switch (locknum) {
710         case TG3_APE_LOCK_GPIO:
711                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
712                         return 0;
713         case TG3_APE_LOCK_GRC:
714         case TG3_APE_LOCK_MEM:
715                 if (!tp->pci_fn)
716                         bit = APE_LOCK_REQ_DRIVER;
717                 else
718                         bit = 1 << tp->pci_fn;
719                 break;
720         case TG3_APE_LOCK_PHY0:
721         case TG3_APE_LOCK_PHY1:
722         case TG3_APE_LOCK_PHY2:
723         case TG3_APE_LOCK_PHY3:
724                 bit = APE_LOCK_REQ_DRIVER;
725                 break;
726         default:
727                 return -EINVAL;
728         }
729
730         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
731                 req = TG3_APE_LOCK_REQ;
732                 gnt = TG3_APE_LOCK_GRANT;
733         } else {
734                 req = TG3_APE_PER_LOCK_REQ;
735                 gnt = TG3_APE_PER_LOCK_GRANT;
736         }
737
738         off = 4 * locknum;
739
740         tg3_ape_write32(tp, req + off, bit);
741
742         /* Wait for up to 1 millisecond to acquire lock. */
743         for (i = 0; i < 100; i++) {
744                 status = tg3_ape_read32(tp, gnt + off);
745                 if (status == bit)
746                         break;
747                 if (pci_channel_offline(tp->pdev))
748                         break;
749
750                 udelay(10);
751         }
752
753         if (status != bit) {
754                 /* Revoke the lock request. */
755                 tg3_ape_write32(tp, gnt + off, bit);
756                 ret = -EBUSY;
757         }
758
759         return ret;
760 }
761
762 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
763 {
764         u32 gnt, bit;
765
766         if (!tg3_flag(tp, ENABLE_APE))
767                 return;
768
769         switch (locknum) {
770         case TG3_APE_LOCK_GPIO:
771                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
772                         return;
773         case TG3_APE_LOCK_GRC:
774         case TG3_APE_LOCK_MEM:
775                 if (!tp->pci_fn)
776                         bit = APE_LOCK_GRANT_DRIVER;
777                 else
778                         bit = 1 << tp->pci_fn;
779                 break;
780         case TG3_APE_LOCK_PHY0:
781         case TG3_APE_LOCK_PHY1:
782         case TG3_APE_LOCK_PHY2:
783         case TG3_APE_LOCK_PHY3:
784                 bit = APE_LOCK_GRANT_DRIVER;
785                 break;
786         default:
787                 return;
788         }
789
790         if (tg3_asic_rev(tp) == ASIC_REV_5761)
791                 gnt = TG3_APE_LOCK_GRANT;
792         else
793                 gnt = TG3_APE_PER_LOCK_GRANT;
794
795         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
796 }
797
798 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
799 {
800         u32 apedata;
801
802         while (timeout_us) {
803                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
804                         return -EBUSY;
805
806                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
807                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
808                         break;
809
810                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
811
812                 udelay(10);
813                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
814         }
815
816         return timeout_us ? 0 : -EBUSY;
817 }
818
819 #ifdef CONFIG_TIGON3_HWMON
820 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
821 {
822         u32 i, apedata;
823
824         for (i = 0; i < timeout_us / 10; i++) {
825                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
826
827                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
828                         break;
829
830                 udelay(10);
831         }
832
833         return i == timeout_us / 10;
834 }
835
836 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
837                                    u32 len)
838 {
839         int err;
840         u32 i, bufoff, msgoff, maxlen, apedata;
841
842         if (!tg3_flag(tp, APE_HAS_NCSI))
843                 return 0;
844
845         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
846         if (apedata != APE_SEG_SIG_MAGIC)
847                 return -ENODEV;
848
849         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
850         if (!(apedata & APE_FW_STATUS_READY))
851                 return -EAGAIN;
852
853         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
854                  TG3_APE_SHMEM_BASE;
855         msgoff = bufoff + 2 * sizeof(u32);
856         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
857
858         while (len) {
859                 u32 length;
860
861                 /* Cap xfer sizes to scratchpad limits. */
862                 length = (len > maxlen) ? maxlen : len;
863                 len -= length;
864
865                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
866                 if (!(apedata & APE_FW_STATUS_READY))
867                         return -EAGAIN;
868
869                 /* Wait for up to 1 msec for APE to service previous event. */
870                 err = tg3_ape_event_lock(tp, 1000);
871                 if (err)
872                         return err;
873
874                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
875                           APE_EVENT_STATUS_SCRTCHPD_READ |
876                           APE_EVENT_STATUS_EVENT_PENDING;
877                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
878
879                 tg3_ape_write32(tp, bufoff, base_off);
880                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
881
882                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
883                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
884
885                 base_off += length;
886
887                 if (tg3_ape_wait_for_event(tp, 30000))
888                         return -EAGAIN;
889
890                 for (i = 0; length; i += 4, length -= 4) {
891                         u32 val = tg3_ape_read32(tp, msgoff + i);
892                         memcpy(data, &val, sizeof(u32));
893                         data++;
894                 }
895         }
896
897         return 0;
898 }
899 #endif
900
901 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
902 {
903         int err;
904         u32 apedata;
905
906         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
907         if (apedata != APE_SEG_SIG_MAGIC)
908                 return -EAGAIN;
909
910         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
911         if (!(apedata & APE_FW_STATUS_READY))
912                 return -EAGAIN;
913
914         /* Wait for up to 1 millisecond for APE to service previous event. */
915         err = tg3_ape_event_lock(tp, 1000);
916         if (err)
917                 return err;
918
919         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
920                         event | APE_EVENT_STATUS_EVENT_PENDING);
921
922         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
923         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
924
925         return 0;
926 }
927
928 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
929 {
930         u32 event;
931         u32 apedata;
932
933         if (!tg3_flag(tp, ENABLE_APE))
934                 return;
935
936         switch (kind) {
937         case RESET_KIND_INIT:
938                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
939                                 APE_HOST_SEG_SIG_MAGIC);
940                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
941                                 APE_HOST_SEG_LEN_MAGIC);
942                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
943                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
944                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
945                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
946                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
947                                 APE_HOST_BEHAV_NO_PHYLOCK);
948                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
949                                     TG3_APE_HOST_DRVR_STATE_START);
950
951                 event = APE_EVENT_STATUS_STATE_START;
952                 break;
953         case RESET_KIND_SHUTDOWN:
954                 /* With the interface we are currently using,
955                  * APE does not track driver state.  Wiping
956                  * out the HOST SEGMENT SIGNATURE forces
957                  * the APE to assume OS absent status.
958                  */
959                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
960
961                 if (device_may_wakeup(&tp->pdev->dev) &&
962                     tg3_flag(tp, WOL_ENABLE)) {
963                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
964                                             TG3_APE_HOST_WOL_SPEED_AUTO);
965                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
966                 } else
967                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
968
969                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
970
971                 event = APE_EVENT_STATUS_STATE_UNLOAD;
972                 break;
973         default:
974                 return;
975         }
976
977         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
978
979         tg3_ape_send_event(tp, event);
980 }
981
982 static void tg3_disable_ints(struct tg3 *tp)
983 {
984         int i;
985
986         tw32(TG3PCI_MISC_HOST_CTRL,
987              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
988         for (i = 0; i < tp->irq_max; i++)
989                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
990 }
991
992 static void tg3_enable_ints(struct tg3 *tp)
993 {
994         int i;
995
996         tp->irq_sync = 0;
997         wmb();
998
999         tw32(TG3PCI_MISC_HOST_CTRL,
1000              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1001
1002         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1003         for (i = 0; i < tp->irq_cnt; i++) {
1004                 struct tg3_napi *tnapi = &tp->napi[i];
1005
1006                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1007                 if (tg3_flag(tp, 1SHOT_MSI))
1008                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1009
1010                 tp->coal_now |= tnapi->coal_now;
1011         }
1012
1013         /* Force an initial interrupt */
1014         if (!tg3_flag(tp, TAGGED_STATUS) &&
1015             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1016                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1017         else
1018                 tw32(HOSTCC_MODE, tp->coal_now);
1019
1020         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1021 }
1022
1023 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1024 {
1025         struct tg3 *tp = tnapi->tp;
1026         struct tg3_hw_status *sblk = tnapi->hw_status;
1027         unsigned int work_exists = 0;
1028
1029         /* check for phy events */
1030         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1031                 if (sblk->status & SD_STATUS_LINK_CHG)
1032                         work_exists = 1;
1033         }
1034
1035         /* check for TX work to do */
1036         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1037                 work_exists = 1;
1038
1039         /* check for RX work to do */
1040         if (tnapi->rx_rcb_prod_idx &&
1041             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1042                 work_exists = 1;
1043
1044         return work_exists;
1045 }
1046
1047 /* tg3_int_reenable
1048  *  similar to tg3_enable_ints, but it accurately determines whether there
1049  *  is new work pending and can return without flushing the PIO write
1050  *  which reenables interrupts
1051  */
1052 static void tg3_int_reenable(struct tg3_napi *tnapi)
1053 {
1054         struct tg3 *tp = tnapi->tp;
1055
1056         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1057         mmiowb();
1058
1059         /* When doing tagged status, this work check is unnecessary.
1060          * The last_tag we write above tells the chip which piece of
1061          * work we've completed.
1062          */
1063         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1064                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1065                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1066 }
1067
1068 static void tg3_switch_clocks(struct tg3 *tp)
1069 {
1070         u32 clock_ctrl;
1071         u32 orig_clock_ctrl;
1072
1073         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1074                 return;
1075
1076         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1077
1078         orig_clock_ctrl = clock_ctrl;
1079         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1080                        CLOCK_CTRL_CLKRUN_OENABLE |
1081                        0x1f);
1082         tp->pci_clock_ctrl = clock_ctrl;
1083
1084         if (tg3_flag(tp, 5705_PLUS)) {
1085                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1086                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1087                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1088                 }
1089         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1090                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1091                             clock_ctrl |
1092                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1093                             40);
1094                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1095                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1096                             40);
1097         }
1098         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1099 }
1100
1101 #define PHY_BUSY_LOOPS  5000
1102
1103 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1104                          u32 *val)
1105 {
1106         u32 frame_val;
1107         unsigned int loops;
1108         int ret;
1109
1110         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1111                 tw32_f(MAC_MI_MODE,
1112                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1113                 udelay(80);
1114         }
1115
1116         tg3_ape_lock(tp, tp->phy_ape_lock);
1117
1118         *val = 0x0;
1119
1120         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1121                       MI_COM_PHY_ADDR_MASK);
1122         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1123                       MI_COM_REG_ADDR_MASK);
1124         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1125
1126         tw32_f(MAC_MI_COM, frame_val);
1127
1128         loops = PHY_BUSY_LOOPS;
1129         while (loops != 0) {
1130                 udelay(10);
1131                 frame_val = tr32(MAC_MI_COM);
1132
1133                 if ((frame_val & MI_COM_BUSY) == 0) {
1134                         udelay(5);
1135                         frame_val = tr32(MAC_MI_COM);
1136                         break;
1137                 }
1138                 loops -= 1;
1139         }
1140
1141         ret = -EBUSY;
1142         if (loops != 0) {
1143                 *val = frame_val & MI_COM_DATA_MASK;
1144                 ret = 0;
1145         }
1146
1147         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1148                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1149                 udelay(80);
1150         }
1151
1152         tg3_ape_unlock(tp, tp->phy_ape_lock);
1153
1154         return ret;
1155 }
1156
1157 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1158 {
1159         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1160 }
1161
1162 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1163                           u32 val)
1164 {
1165         u32 frame_val;
1166         unsigned int loops;
1167         int ret;
1168
1169         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1170             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1171                 return 0;
1172
1173         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1174                 tw32_f(MAC_MI_MODE,
1175                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1176                 udelay(80);
1177         }
1178
1179         tg3_ape_lock(tp, tp->phy_ape_lock);
1180
1181         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1182                       MI_COM_PHY_ADDR_MASK);
1183         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1184                       MI_COM_REG_ADDR_MASK);
1185         frame_val |= (val & MI_COM_DATA_MASK);
1186         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1187
1188         tw32_f(MAC_MI_COM, frame_val);
1189
1190         loops = PHY_BUSY_LOOPS;
1191         while (loops != 0) {
1192                 udelay(10);
1193                 frame_val = tr32(MAC_MI_COM);
1194                 if ((frame_val & MI_COM_BUSY) == 0) {
1195                         udelay(5);
1196                         frame_val = tr32(MAC_MI_COM);
1197                         break;
1198                 }
1199                 loops -= 1;
1200         }
1201
1202         ret = -EBUSY;
1203         if (loops != 0)
1204                 ret = 0;
1205
1206         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1207                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1208                 udelay(80);
1209         }
1210
1211         tg3_ape_unlock(tp, tp->phy_ape_lock);
1212
1213         return ret;
1214 }
1215
1216 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1217 {
1218         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1219 }
1220
1221 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1222 {
1223         int err;
1224
1225         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1226         if (err)
1227                 goto done;
1228
1229         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1230         if (err)
1231                 goto done;
1232
1233         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1234                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1235         if (err)
1236                 goto done;
1237
1238         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1239
1240 done:
1241         return err;
1242 }
1243
1244 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1245 {
1246         int err;
1247
1248         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1249         if (err)
1250                 goto done;
1251
1252         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1253         if (err)
1254                 goto done;
1255
1256         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1257                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1258         if (err)
1259                 goto done;
1260
1261         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1262
1263 done:
1264         return err;
1265 }
1266
1267 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1268 {
1269         int err;
1270
1271         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1272         if (!err)
1273                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1274
1275         return err;
1276 }
1277
1278 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1279 {
1280         int err;
1281
1282         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1283         if (!err)
1284                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1285
1286         return err;
1287 }
1288
1289 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1290 {
1291         int err;
1292
1293         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1294                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1295                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1296         if (!err)
1297                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1298
1299         return err;
1300 }
1301
1302 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1303 {
1304         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1305                 set |= MII_TG3_AUXCTL_MISC_WREN;
1306
1307         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1308 }
1309
1310 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1311 {
1312         u32 val;
1313         int err;
1314
1315         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1316
1317         if (err)
1318                 return err;
1319
1320         if (enable)
1321                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1322         else
1323                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1324
1325         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1326                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1327
1328         return err;
1329 }
1330
1331 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1332 {
1333         return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1334                             reg | val | MII_TG3_MISC_SHDW_WREN);
1335 }
1336
1337 static int tg3_bmcr_reset(struct tg3 *tp)
1338 {
1339         u32 phy_control;
1340         int limit, err;
1341
1342         /* OK, reset it, and poll the BMCR_RESET bit until it
1343          * clears or we time out.
1344          */
1345         phy_control = BMCR_RESET;
1346         err = tg3_writephy(tp, MII_BMCR, phy_control);
1347         if (err != 0)
1348                 return -EBUSY;
1349
1350         limit = 5000;
1351         while (limit--) {
1352                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1353                 if (err != 0)
1354                         return -EBUSY;
1355
1356                 if ((phy_control & BMCR_RESET) == 0) {
1357                         udelay(40);
1358                         break;
1359                 }
1360                 udelay(10);
1361         }
1362         if (limit < 0)
1363                 return -EBUSY;
1364
1365         return 0;
1366 }
1367
1368 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1369 {
1370         struct tg3 *tp = bp->priv;
1371         u32 val;
1372
1373         spin_lock_bh(&tp->lock);
1374
1375         if (__tg3_readphy(tp, mii_id, reg, &val))
1376                 val = -EIO;
1377
1378         spin_unlock_bh(&tp->lock);
1379
1380         return val;
1381 }
1382
1383 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1384 {
1385         struct tg3 *tp = bp->priv;
1386         u32 ret = 0;
1387
1388         spin_lock_bh(&tp->lock);
1389
1390         if (__tg3_writephy(tp, mii_id, reg, val))
1391                 ret = -EIO;
1392
1393         spin_unlock_bh(&tp->lock);
1394
1395         return ret;
1396 }
1397
1398 static void tg3_mdio_config_5785(struct tg3 *tp)
1399 {
1400         u32 val;
1401         struct phy_device *phydev;
1402
1403         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1404         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1405         case PHY_ID_BCM50610:
1406         case PHY_ID_BCM50610M:
1407                 val = MAC_PHYCFG2_50610_LED_MODES;
1408                 break;
1409         case PHY_ID_BCMAC131:
1410                 val = MAC_PHYCFG2_AC131_LED_MODES;
1411                 break;
1412         case PHY_ID_RTL8211C:
1413                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1414                 break;
1415         case PHY_ID_RTL8201E:
1416                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1417                 break;
1418         default:
1419                 return;
1420         }
1421
1422         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1423                 tw32(MAC_PHYCFG2, val);
1424
1425                 val = tr32(MAC_PHYCFG1);
1426                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1427                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1428                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1429                 tw32(MAC_PHYCFG1, val);
1430
1431                 return;
1432         }
1433
1434         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1435                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1436                        MAC_PHYCFG2_FMODE_MASK_MASK |
1437                        MAC_PHYCFG2_GMODE_MASK_MASK |
1438                        MAC_PHYCFG2_ACT_MASK_MASK   |
1439                        MAC_PHYCFG2_QUAL_MASK_MASK |
1440                        MAC_PHYCFG2_INBAND_ENABLE;
1441
1442         tw32(MAC_PHYCFG2, val);
1443
1444         val = tr32(MAC_PHYCFG1);
1445         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1446                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1447         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1448                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1449                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1450                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1451                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1452         }
1453         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1454                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1455         tw32(MAC_PHYCFG1, val);
1456
1457         val = tr32(MAC_EXT_RGMII_MODE);
1458         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1459                  MAC_RGMII_MODE_RX_QUALITY |
1460                  MAC_RGMII_MODE_RX_ACTIVITY |
1461                  MAC_RGMII_MODE_RX_ENG_DET |
1462                  MAC_RGMII_MODE_TX_ENABLE |
1463                  MAC_RGMII_MODE_TX_LOWPWR |
1464                  MAC_RGMII_MODE_TX_RESET);
1465         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1466                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1467                         val |= MAC_RGMII_MODE_RX_INT_B |
1468                                MAC_RGMII_MODE_RX_QUALITY |
1469                                MAC_RGMII_MODE_RX_ACTIVITY |
1470                                MAC_RGMII_MODE_RX_ENG_DET;
1471                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1472                         val |= MAC_RGMII_MODE_TX_ENABLE |
1473                                MAC_RGMII_MODE_TX_LOWPWR |
1474                                MAC_RGMII_MODE_TX_RESET;
1475         }
1476         tw32(MAC_EXT_RGMII_MODE, val);
1477 }
1478
1479 static void tg3_mdio_start(struct tg3 *tp)
1480 {
1481         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1482         tw32_f(MAC_MI_MODE, tp->mi_mode);
1483         udelay(80);
1484
1485         if (tg3_flag(tp, MDIOBUS_INITED) &&
1486             tg3_asic_rev(tp) == ASIC_REV_5785)
1487                 tg3_mdio_config_5785(tp);
1488 }
1489
1490 static int tg3_mdio_init(struct tg3 *tp)
1491 {
1492         int i;
1493         u32 reg;
1494         struct phy_device *phydev;
1495
1496         if (tg3_flag(tp, 5717_PLUS)) {
1497                 u32 is_serdes;
1498
1499                 tp->phy_addr = tp->pci_fn + 1;
1500
1501                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1502                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1503                 else
1504                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1505                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1506                 if (is_serdes)
1507                         tp->phy_addr += 7;
1508         } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1509                 int addr;
1510
1511                 addr = ssb_gige_get_phyaddr(tp->pdev);
1512                 if (addr < 0)
1513                         return addr;
1514                 tp->phy_addr = addr;
1515         } else
1516                 tp->phy_addr = TG3_PHY_MII_ADDR;
1517
1518         tg3_mdio_start(tp);
1519
1520         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1521                 return 0;
1522
1523         tp->mdio_bus = mdiobus_alloc();
1524         if (tp->mdio_bus == NULL)
1525                 return -ENOMEM;
1526
1527         tp->mdio_bus->name     = "tg3 mdio bus";
1528         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1529                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1530         tp->mdio_bus->priv     = tp;
1531         tp->mdio_bus->parent   = &tp->pdev->dev;
1532         tp->mdio_bus->read     = &tg3_mdio_read;
1533         tp->mdio_bus->write    = &tg3_mdio_write;
1534         tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1535
1536         /* The bus registration will look for all the PHYs on the mdio bus.
1537          * Unfortunately, it does not ensure the PHY is powered up before
1538          * accessing the PHY ID registers.  A chip reset is the
1539          * quickest way to bring the device back to an operational state..
1540          */
1541         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1542                 tg3_bmcr_reset(tp);
1543
1544         i = mdiobus_register(tp->mdio_bus);
1545         if (i) {
1546                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1547                 mdiobus_free(tp->mdio_bus);
1548                 return i;
1549         }
1550
1551         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1552
1553         if (!phydev || !phydev->drv) {
1554                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1555                 mdiobus_unregister(tp->mdio_bus);
1556                 mdiobus_free(tp->mdio_bus);
1557                 return -ENODEV;
1558         }
1559
1560         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1561         case PHY_ID_BCM57780:
1562                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1563                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1564                 break;
1565         case PHY_ID_BCM50610:
1566         case PHY_ID_BCM50610M:
1567                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1568                                      PHY_BRCM_RX_REFCLK_UNUSED |
1569                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1570                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1571                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1572                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1573                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1574                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1575                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1576                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1577                 /* fallthru */
1578         case PHY_ID_RTL8211C:
1579                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1580                 break;
1581         case PHY_ID_RTL8201E:
1582         case PHY_ID_BCMAC131:
1583                 phydev->interface = PHY_INTERFACE_MODE_MII;
1584                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1585                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1586                 break;
1587         }
1588
1589         tg3_flag_set(tp, MDIOBUS_INITED);
1590
1591         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1592                 tg3_mdio_config_5785(tp);
1593
1594         return 0;
1595 }
1596
1597 static void tg3_mdio_fini(struct tg3 *tp)
1598 {
1599         if (tg3_flag(tp, MDIOBUS_INITED)) {
1600                 tg3_flag_clear(tp, MDIOBUS_INITED);
1601                 mdiobus_unregister(tp->mdio_bus);
1602                 mdiobus_free(tp->mdio_bus);
1603         }
1604 }
1605
1606 /* tp->lock is held. */
1607 static inline void tg3_generate_fw_event(struct tg3 *tp)
1608 {
1609         u32 val;
1610
1611         val = tr32(GRC_RX_CPU_EVENT);
1612         val |= GRC_RX_CPU_DRIVER_EVENT;
1613         tw32_f(GRC_RX_CPU_EVENT, val);
1614
1615         tp->last_event_jiffies = jiffies;
1616 }
1617
1618 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1619
1620 /* tp->lock is held. */
1621 static void tg3_wait_for_event_ack(struct tg3 *tp)
1622 {
1623         int i;
1624         unsigned int delay_cnt;
1625         long time_remain;
1626
1627         /* If enough time has passed, no wait is necessary. */
1628         time_remain = (long)(tp->last_event_jiffies + 1 +
1629                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1630                       (long)jiffies;
1631         if (time_remain < 0)
1632                 return;
1633
1634         /* Check if we can shorten the wait time. */
1635         delay_cnt = jiffies_to_usecs(time_remain);
1636         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1637                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1638         delay_cnt = (delay_cnt >> 3) + 1;
1639
1640         for (i = 0; i < delay_cnt; i++) {
1641                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1642                         break;
1643                 if (pci_channel_offline(tp->pdev))
1644                         break;
1645
1646                 udelay(8);
1647         }
1648 }
1649
1650 /* tp->lock is held. */
1651 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1652 {
1653         u32 reg, val;
1654
1655         val = 0;
1656         if (!tg3_readphy(tp, MII_BMCR, &reg))
1657                 val = reg << 16;
1658         if (!tg3_readphy(tp, MII_BMSR, &reg))
1659                 val |= (reg & 0xffff);
1660         *data++ = val;
1661
1662         val = 0;
1663         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1664                 val = reg << 16;
1665         if (!tg3_readphy(tp, MII_LPA, &reg))
1666                 val |= (reg & 0xffff);
1667         *data++ = val;
1668
1669         val = 0;
1670         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1671                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1672                         val = reg << 16;
1673                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1674                         val |= (reg & 0xffff);
1675         }
1676         *data++ = val;
1677
1678         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1679                 val = reg << 16;
1680         else
1681                 val = 0;
1682         *data++ = val;
1683 }
1684
1685 /* tp->lock is held. */
1686 static void tg3_ump_link_report(struct tg3 *tp)
1687 {
1688         u32 data[4];
1689
1690         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1691                 return;
1692
1693         tg3_phy_gather_ump_data(tp, data);
1694
1695         tg3_wait_for_event_ack(tp);
1696
1697         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1698         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1699         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1700         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1701         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1702         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1703
1704         tg3_generate_fw_event(tp);
1705 }
1706
1707 /* tp->lock is held. */
1708 static void tg3_stop_fw(struct tg3 *tp)
1709 {
1710         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1711                 /* Wait for RX cpu to ACK the previous event. */
1712                 tg3_wait_for_event_ack(tp);
1713
1714                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1715
1716                 tg3_generate_fw_event(tp);
1717
1718                 /* Wait for RX cpu to ACK this event. */
1719                 tg3_wait_for_event_ack(tp);
1720         }
1721 }
1722
1723 /* tp->lock is held. */
1724 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1725 {
1726         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1727                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1728
1729         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1730                 switch (kind) {
1731                 case RESET_KIND_INIT:
1732                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1733                                       DRV_STATE_START);
1734                         break;
1735
1736                 case RESET_KIND_SHUTDOWN:
1737                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1738                                       DRV_STATE_UNLOAD);
1739                         break;
1740
1741                 case RESET_KIND_SUSPEND:
1742                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1743                                       DRV_STATE_SUSPEND);
1744                         break;
1745
1746                 default:
1747                         break;
1748                 }
1749         }
1750 }
1751
1752 /* tp->lock is held. */
1753 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1754 {
1755         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1756                 switch (kind) {
1757                 case RESET_KIND_INIT:
1758                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1759                                       DRV_STATE_START_DONE);
1760                         break;
1761
1762                 case RESET_KIND_SHUTDOWN:
1763                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1764                                       DRV_STATE_UNLOAD_DONE);
1765                         break;
1766
1767                 default:
1768                         break;
1769                 }
1770         }
1771 }
1772
1773 /* tp->lock is held. */
1774 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1775 {
1776         if (tg3_flag(tp, ENABLE_ASF)) {
1777                 switch (kind) {
1778                 case RESET_KIND_INIT:
1779                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1780                                       DRV_STATE_START);
1781                         break;
1782
1783                 case RESET_KIND_SHUTDOWN:
1784                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1785                                       DRV_STATE_UNLOAD);
1786                         break;
1787
1788                 case RESET_KIND_SUSPEND:
1789                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1790                                       DRV_STATE_SUSPEND);
1791                         break;
1792
1793                 default:
1794                         break;
1795                 }
1796         }
1797 }
1798
1799 static int tg3_poll_fw(struct tg3 *tp)
1800 {
1801         int i;
1802         u32 val;
1803
1804         if (tg3_flag(tp, NO_FWARE_REPORTED))
1805                 return 0;
1806
1807         if (tg3_flag(tp, IS_SSB_CORE)) {
1808                 /* We don't use firmware. */
1809                 return 0;
1810         }
1811
1812         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1813                 /* Wait up to 20ms for init done. */
1814                 for (i = 0; i < 200; i++) {
1815                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1816                                 return 0;
1817                         if (pci_channel_offline(tp->pdev))
1818                                 return -ENODEV;
1819
1820                         udelay(100);
1821                 }
1822                 return -ENODEV;
1823         }
1824
1825         /* Wait for firmware initialization to complete. */
1826         for (i = 0; i < 100000; i++) {
1827                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1828                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1829                         break;
1830                 if (pci_channel_offline(tp->pdev)) {
1831                         if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1832                                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1833                                 netdev_info(tp->dev, "No firmware running\n");
1834                         }
1835
1836                         break;
1837                 }
1838
1839                 udelay(10);
1840         }
1841
1842         /* Chip might not be fitted with firmware.  Some Sun onboard
1843          * parts are configured like that.  So don't signal the timeout
1844          * of the above loop as an error, but do report the lack of
1845          * running firmware once.
1846          */
1847         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1848                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1849
1850                 netdev_info(tp->dev, "No firmware running\n");
1851         }
1852
1853         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1854                 /* The 57765 A0 needs a little more
1855                  * time to do some important work.
1856                  */
1857                 mdelay(10);
1858         }
1859
1860         return 0;
1861 }
1862
1863 static void tg3_link_report(struct tg3 *tp)
1864 {
1865         if (!netif_carrier_ok(tp->dev)) {
1866                 netif_info(tp, link, tp->dev, "Link is down\n");
1867                 tg3_ump_link_report(tp);
1868         } else if (netif_msg_link(tp)) {
1869                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1870                             (tp->link_config.active_speed == SPEED_1000 ?
1871                              1000 :
1872                              (tp->link_config.active_speed == SPEED_100 ?
1873                               100 : 10)),
1874                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1875                              "full" : "half"));
1876
1877                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1878                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1879                             "on" : "off",
1880                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1881                             "on" : "off");
1882
1883                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1884                         netdev_info(tp->dev, "EEE is %s\n",
1885                                     tp->setlpicnt ? "enabled" : "disabled");
1886
1887                 tg3_ump_link_report(tp);
1888         }
1889
1890         tp->link_up = netif_carrier_ok(tp->dev);
1891 }
1892
1893 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1894 {
1895         u32 flowctrl = 0;
1896
1897         if (adv & ADVERTISE_PAUSE_CAP) {
1898                 flowctrl |= FLOW_CTRL_RX;
1899                 if (!(adv & ADVERTISE_PAUSE_ASYM))
1900                         flowctrl |= FLOW_CTRL_TX;
1901         } else if (adv & ADVERTISE_PAUSE_ASYM)
1902                 flowctrl |= FLOW_CTRL_TX;
1903
1904         return flowctrl;
1905 }
1906
1907 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1908 {
1909         u16 miireg;
1910
1911         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1912                 miireg = ADVERTISE_1000XPAUSE;
1913         else if (flow_ctrl & FLOW_CTRL_TX)
1914                 miireg = ADVERTISE_1000XPSE_ASYM;
1915         else if (flow_ctrl & FLOW_CTRL_RX)
1916                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1917         else
1918                 miireg = 0;
1919
1920         return miireg;
1921 }
1922
1923 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1924 {
1925         u32 flowctrl = 0;
1926
1927         if (adv & ADVERTISE_1000XPAUSE) {
1928                 flowctrl |= FLOW_CTRL_RX;
1929                 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1930                         flowctrl |= FLOW_CTRL_TX;
1931         } else if (adv & ADVERTISE_1000XPSE_ASYM)
1932                 flowctrl |= FLOW_CTRL_TX;
1933
1934         return flowctrl;
1935 }
1936
1937 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1938 {
1939         u8 cap = 0;
1940
1941         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1942                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1943         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1944                 if (lcladv & ADVERTISE_1000XPAUSE)
1945                         cap = FLOW_CTRL_RX;
1946                 if (rmtadv & ADVERTISE_1000XPAUSE)
1947                         cap = FLOW_CTRL_TX;
1948         }
1949
1950         return cap;
1951 }
1952
1953 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1954 {
1955         u8 autoneg;
1956         u8 flowctrl = 0;
1957         u32 old_rx_mode = tp->rx_mode;
1958         u32 old_tx_mode = tp->tx_mode;
1959
1960         if (tg3_flag(tp, USE_PHYLIB))
1961                 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1962         else
1963                 autoneg = tp->link_config.autoneg;
1964
1965         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1966                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1967                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1968                 else
1969                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1970         } else
1971                 flowctrl = tp->link_config.flowctrl;
1972
1973         tp->link_config.active_flowctrl = flowctrl;
1974
1975         if (flowctrl & FLOW_CTRL_RX)
1976                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1977         else
1978                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1979
1980         if (old_rx_mode != tp->rx_mode)
1981                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1982
1983         if (flowctrl & FLOW_CTRL_TX)
1984                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1985         else
1986                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1987
1988         if (old_tx_mode != tp->tx_mode)
1989                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1990 }
1991
1992 static void tg3_adjust_link(struct net_device *dev)
1993 {
1994         u8 oldflowctrl, linkmesg = 0;
1995         u32 mac_mode, lcl_adv, rmt_adv;
1996         struct tg3 *tp = netdev_priv(dev);
1997         struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1998
1999         spin_lock_bh(&tp->lock);
2000
2001         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2002                                     MAC_MODE_HALF_DUPLEX);
2003
2004         oldflowctrl = tp->link_config.active_flowctrl;
2005
2006         if (phydev->link) {
2007                 lcl_adv = 0;
2008                 rmt_adv = 0;
2009
2010                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2011                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2012                 else if (phydev->speed == SPEED_1000 ||
2013                          tg3_asic_rev(tp) != ASIC_REV_5785)
2014                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
2015                 else
2016                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2017
2018                 if (phydev->duplex == DUPLEX_HALF)
2019                         mac_mode |= MAC_MODE_HALF_DUPLEX;
2020                 else {
2021                         lcl_adv = mii_advertise_flowctrl(
2022                                   tp->link_config.flowctrl);
2023
2024                         if (phydev->pause)
2025                                 rmt_adv = LPA_PAUSE_CAP;
2026                         if (phydev->asym_pause)
2027                                 rmt_adv |= LPA_PAUSE_ASYM;
2028                 }
2029
2030                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2031         } else
2032                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2033
2034         if (mac_mode != tp->mac_mode) {
2035                 tp->mac_mode = mac_mode;
2036                 tw32_f(MAC_MODE, tp->mac_mode);
2037                 udelay(40);
2038         }
2039
2040         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2041                 if (phydev->speed == SPEED_10)
2042                         tw32(MAC_MI_STAT,
2043                              MAC_MI_STAT_10MBPS_MODE |
2044                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2045                 else
2046                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2047         }
2048
2049         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2050                 tw32(MAC_TX_LENGTHS,
2051                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2052                       (6 << TX_LENGTHS_IPG_SHIFT) |
2053                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2054         else
2055                 tw32(MAC_TX_LENGTHS,
2056                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2057                       (6 << TX_LENGTHS_IPG_SHIFT) |
2058                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2059
2060         if (phydev->link != tp->old_link ||
2061             phydev->speed != tp->link_config.active_speed ||
2062             phydev->duplex != tp->link_config.active_duplex ||
2063             oldflowctrl != tp->link_config.active_flowctrl)
2064                 linkmesg = 1;
2065
2066         tp->old_link = phydev->link;
2067         tp->link_config.active_speed = phydev->speed;
2068         tp->link_config.active_duplex = phydev->duplex;
2069
2070         spin_unlock_bh(&tp->lock);
2071
2072         if (linkmesg)
2073                 tg3_link_report(tp);
2074 }
2075
2076 static int tg3_phy_init(struct tg3 *tp)
2077 {
2078         struct phy_device *phydev;
2079
2080         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2081                 return 0;
2082
2083         /* Bring the PHY back to a known state. */
2084         tg3_bmcr_reset(tp);
2085
2086         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2087
2088         /* Attach the MAC to the PHY. */
2089         phydev = phy_connect(tp->dev, phydev_name(phydev),
2090                              tg3_adjust_link, phydev->interface);
2091         if (IS_ERR(phydev)) {
2092                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2093                 return PTR_ERR(phydev);
2094         }
2095
2096         /* Mask with MAC supported features. */
2097         switch (phydev->interface) {
2098         case PHY_INTERFACE_MODE_GMII:
2099         case PHY_INTERFACE_MODE_RGMII:
2100                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2101                         phydev->supported &= (PHY_GBIT_FEATURES |
2102                                               SUPPORTED_Pause |
2103                                               SUPPORTED_Asym_Pause);
2104                         break;
2105                 }
2106                 /* fallthru */
2107         case PHY_INTERFACE_MODE_MII:
2108                 phydev->supported &= (PHY_BASIC_FEATURES |
2109                                       SUPPORTED_Pause |
2110                                       SUPPORTED_Asym_Pause);
2111                 break;
2112         default:
2113                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2114                 return -EINVAL;
2115         }
2116
2117         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2118
2119         phydev->advertising = phydev->supported;
2120
2121         phy_attached_info(phydev);
2122
2123         return 0;
2124 }
2125
2126 static void tg3_phy_start(struct tg3 *tp)
2127 {
2128         struct phy_device *phydev;
2129
2130         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2131                 return;
2132
2133         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2134
2135         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2136                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2137                 phydev->speed = tp->link_config.speed;
2138                 phydev->duplex = tp->link_config.duplex;
2139                 phydev->autoneg = tp->link_config.autoneg;
2140                 phydev->advertising = tp->link_config.advertising;
2141         }
2142
2143         phy_start(phydev);
2144
2145         phy_start_aneg(phydev);
2146 }
2147
2148 static void tg3_phy_stop(struct tg3 *tp)
2149 {
2150         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2151                 return;
2152
2153         phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2154 }
2155
2156 static void tg3_phy_fini(struct tg3 *tp)
2157 {
2158         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2159                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2160                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2161         }
2162 }
2163
2164 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2165 {
2166         int err;
2167         u32 val;
2168
2169         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2170                 return 0;
2171
2172         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2173                 /* Cannot do read-modify-write on 5401 */
2174                 err = tg3_phy_auxctl_write(tp,
2175                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2176                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2177                                            0x4c20);
2178                 goto done;
2179         }
2180
2181         err = tg3_phy_auxctl_read(tp,
2182                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2183         if (err)
2184                 return err;
2185
2186         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2187         err = tg3_phy_auxctl_write(tp,
2188                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2189
2190 done:
2191         return err;
2192 }
2193
2194 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2195 {
2196         u32 phytest;
2197
2198         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2199                 u32 phy;
2200
2201                 tg3_writephy(tp, MII_TG3_FET_TEST,
2202                              phytest | MII_TG3_FET_SHADOW_EN);
2203                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2204                         if (enable)
2205                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2206                         else
2207                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2208                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2209                 }
2210                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2211         }
2212 }
2213
2214 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2215 {
2216         u32 reg;
2217
2218         if (!tg3_flag(tp, 5705_PLUS) ||
2219             (tg3_flag(tp, 5717_PLUS) &&
2220              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2221                 return;
2222
2223         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2224                 tg3_phy_fet_toggle_apd(tp, enable);
2225                 return;
2226         }
2227
2228         reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2229               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2230               MII_TG3_MISC_SHDW_SCR5_SDTL |
2231               MII_TG3_MISC_SHDW_SCR5_C125OE;
2232         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2233                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2234
2235         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2236
2237
2238         reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2239         if (enable)
2240                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2241
2242         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2243 }
2244
2245 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2246 {
2247         u32 phy;
2248
2249         if (!tg3_flag(tp, 5705_PLUS) ||
2250             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2251                 return;
2252
2253         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2254                 u32 ephy;
2255
2256                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2257                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2258
2259                         tg3_writephy(tp, MII_TG3_FET_TEST,
2260                                      ephy | MII_TG3_FET_SHADOW_EN);
2261                         if (!tg3_readphy(tp, reg, &phy)) {
2262                                 if (enable)
2263                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2264                                 else
2265                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2266                                 tg3_writephy(tp, reg, phy);
2267                         }
2268                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2269                 }
2270         } else {
2271                 int ret;
2272
2273                 ret = tg3_phy_auxctl_read(tp,
2274                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2275                 if (!ret) {
2276                         if (enable)
2277                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2278                         else
2279                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2280                         tg3_phy_auxctl_write(tp,
2281                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2282                 }
2283         }
2284 }
2285
2286 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2287 {
2288         int ret;
2289         u32 val;
2290
2291         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2292                 return;
2293
2294         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2295         if (!ret)
2296                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2297                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2298 }
2299
2300 static void tg3_phy_apply_otp(struct tg3 *tp)
2301 {
2302         u32 otp, phy;
2303
2304         if (!tp->phy_otp)
2305                 return;
2306
2307         otp = tp->phy_otp;
2308
2309         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2310                 return;
2311
2312         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2313         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2314         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2315
2316         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2317               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2318         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2319
2320         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2321         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2322         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2323
2324         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2325         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2326
2327         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2328         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2329
2330         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2331               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2332         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2333
2334         tg3_phy_toggle_auxctl_smdsp(tp, false);
2335 }
2336
2337 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2338 {
2339         u32 val;
2340         struct ethtool_eee *dest = &tp->eee;
2341
2342         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2343                 return;
2344
2345         if (eee)
2346                 dest = eee;
2347
2348         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2349                 return;
2350
2351         /* Pull eee_active */
2352         if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2353             val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2354                 dest->eee_active = 1;
2355         } else
2356                 dest->eee_active = 0;
2357
2358         /* Pull lp advertised settings */
2359         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2360                 return;
2361         dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2362
2363         /* Pull advertised and eee_enabled settings */
2364         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2365                 return;
2366         dest->eee_enabled = !!val;
2367         dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2368
2369         /* Pull tx_lpi_enabled */
2370         val = tr32(TG3_CPMU_EEE_MODE);
2371         dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2372
2373         /* Pull lpi timer value */
2374         dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2375 }
2376
2377 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2378 {
2379         u32 val;
2380
2381         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2382                 return;
2383
2384         tp->setlpicnt = 0;
2385
2386         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2387             current_link_up &&
2388             tp->link_config.active_duplex == DUPLEX_FULL &&
2389             (tp->link_config.active_speed == SPEED_100 ||
2390              tp->link_config.active_speed == SPEED_1000)) {
2391                 u32 eeectl;
2392
2393                 if (tp->link_config.active_speed == SPEED_1000)
2394                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2395                 else
2396                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2397
2398                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2399
2400                 tg3_eee_pull_config(tp, NULL);
2401                 if (tp->eee.eee_active)
2402                         tp->setlpicnt = 2;
2403         }
2404
2405         if (!tp->setlpicnt) {
2406                 if (current_link_up &&
2407                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2408                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2409                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2410                 }
2411
2412                 val = tr32(TG3_CPMU_EEE_MODE);
2413                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2414         }
2415 }
2416
2417 static void tg3_phy_eee_enable(struct tg3 *tp)
2418 {
2419         u32 val;
2420
2421         if (tp->link_config.active_speed == SPEED_1000 &&
2422             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2423              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2424              tg3_flag(tp, 57765_CLASS)) &&
2425             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2426                 val = MII_TG3_DSP_TAP26_ALNOKO |
2427                       MII_TG3_DSP_TAP26_RMRXSTO;
2428                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2429                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2430         }
2431
2432         val = tr32(TG3_CPMU_EEE_MODE);
2433         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2434 }
2435
2436 static int tg3_wait_macro_done(struct tg3 *tp)
2437 {
2438         int limit = 100;
2439
2440         while (limit--) {
2441                 u32 tmp32;
2442
2443                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2444                         if ((tmp32 & 0x1000) == 0)
2445                                 break;
2446                 }
2447         }
2448         if (limit < 0)
2449                 return -EBUSY;
2450
2451         return 0;
2452 }
2453
2454 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2455 {
2456         static const u32 test_pat[4][6] = {
2457         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2458         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2459         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2460         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2461         };
2462         int chan;
2463
2464         for (chan = 0; chan < 4; chan++) {
2465                 int i;
2466
2467                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2468                              (chan * 0x2000) | 0x0200);
2469                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2470
2471                 for (i = 0; i < 6; i++)
2472                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2473                                      test_pat[chan][i]);
2474
2475                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2476                 if (tg3_wait_macro_done(tp)) {
2477                         *resetp = 1;
2478                         return -EBUSY;
2479                 }
2480
2481                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2482                              (chan * 0x2000) | 0x0200);
2483                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2484                 if (tg3_wait_macro_done(tp)) {
2485                         *resetp = 1;
2486                         return -EBUSY;
2487                 }
2488
2489                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2490                 if (tg3_wait_macro_done(tp)) {
2491                         *resetp = 1;
2492                         return -EBUSY;
2493                 }
2494
2495                 for (i = 0; i < 6; i += 2) {
2496                         u32 low, high;
2497
2498                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2499                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2500                             tg3_wait_macro_done(tp)) {
2501                                 *resetp = 1;
2502                                 return -EBUSY;
2503                         }
2504                         low &= 0x7fff;
2505                         high &= 0x000f;
2506                         if (low != test_pat[chan][i] ||
2507                             high != test_pat[chan][i+1]) {
2508                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2509                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2510                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2511
2512                                 return -EBUSY;
2513                         }
2514                 }
2515         }
2516
2517         return 0;
2518 }
2519
2520 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2521 {
2522         int chan;
2523
2524         for (chan = 0; chan < 4; chan++) {
2525                 int i;
2526
2527                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2528                              (chan * 0x2000) | 0x0200);
2529                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2530                 for (i = 0; i < 6; i++)
2531                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2532                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2533                 if (tg3_wait_macro_done(tp))
2534                         return -EBUSY;
2535         }
2536
2537         return 0;
2538 }
2539
2540 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2541 {
2542         u32 reg32, phy9_orig;
2543         int retries, do_phy_reset, err;
2544
2545         retries = 10;
2546         do_phy_reset = 1;
2547         do {
2548                 if (do_phy_reset) {
2549                         err = tg3_bmcr_reset(tp);
2550                         if (err)
2551                                 return err;
2552                         do_phy_reset = 0;
2553                 }
2554
2555                 /* Disable transmitter and interrupt.  */
2556                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2557                         continue;
2558
2559                 reg32 |= 0x3000;
2560                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2561
2562                 /* Set full-duplex, 1000 mbps.  */
2563                 tg3_writephy(tp, MII_BMCR,
2564                              BMCR_FULLDPLX | BMCR_SPEED1000);
2565
2566                 /* Set to master mode.  */
2567                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2568                         continue;
2569
2570                 tg3_writephy(tp, MII_CTRL1000,
2571                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2572
2573                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2574                 if (err)
2575                         return err;
2576
2577                 /* Block the PHY control access.  */
2578                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2579
2580                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2581                 if (!err)
2582                         break;
2583         } while (--retries);
2584
2585         err = tg3_phy_reset_chanpat(tp);
2586         if (err)
2587                 return err;
2588
2589         tg3_phydsp_write(tp, 0x8005, 0x0000);
2590
2591         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2592         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2593
2594         tg3_phy_toggle_auxctl_smdsp(tp, false);
2595
2596         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2597
2598         err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2599         if (err)
2600                 return err;
2601
2602         reg32 &= ~0x3000;
2603         tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2604
2605         return 0;
2606 }
2607
2608 static void tg3_carrier_off(struct tg3 *tp)
2609 {
2610         netif_carrier_off(tp->dev);
2611         tp->link_up = false;
2612 }
2613
2614 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2615 {
2616         if (tg3_flag(tp, ENABLE_ASF))
2617                 netdev_warn(tp->dev,
2618                             "Management side-band traffic will be interrupted during phy settings change\n");
2619 }
2620
2621 /* This will reset the tigon3 PHY if there is no valid
2622  * link unless the FORCE argument is non-zero.
2623  */
2624 static int tg3_phy_reset(struct tg3 *tp)
2625 {
2626         u32 val, cpmuctrl;
2627         int err;
2628
2629         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2630                 val = tr32(GRC_MISC_CFG);
2631                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2632                 udelay(40);
2633         }
2634         err  = tg3_readphy(tp, MII_BMSR, &val);
2635         err |= tg3_readphy(tp, MII_BMSR, &val);
2636         if (err != 0)
2637                 return -EBUSY;
2638
2639         if (netif_running(tp->dev) && tp->link_up) {
2640                 netif_carrier_off(tp->dev);
2641                 tg3_link_report(tp);
2642         }
2643
2644         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2645             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2646             tg3_asic_rev(tp) == ASIC_REV_5705) {
2647                 err = tg3_phy_reset_5703_4_5(tp);
2648                 if (err)
2649                         return err;
2650                 goto out;
2651         }
2652
2653         cpmuctrl = 0;
2654         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2655             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2656                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2657                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2658                         tw32(TG3_CPMU_CTRL,
2659                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2660         }
2661
2662         err = tg3_bmcr_reset(tp);
2663         if (err)
2664                 return err;
2665
2666         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2667                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2668                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2669
2670                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2671         }
2672
2673         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2674             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2675                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2676                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2677                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2678                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2679                         udelay(40);
2680                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2681                 }
2682         }
2683
2684         if (tg3_flag(tp, 5717_PLUS) &&
2685             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2686                 return 0;
2687
2688         tg3_phy_apply_otp(tp);
2689
2690         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2691                 tg3_phy_toggle_apd(tp, true);
2692         else
2693                 tg3_phy_toggle_apd(tp, false);
2694
2695 out:
2696         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2697             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2698                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2699                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2700                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2701         }
2702
2703         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2704                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2705                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2706         }
2707
2708         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2709                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2710                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2711                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2712                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2713                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2714                 }
2715         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2716                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2717                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2718                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2719                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2720                                 tg3_writephy(tp, MII_TG3_TEST1,
2721                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2722                         } else
2723                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2724
2725                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2726                 }
2727         }
2728
2729         /* Set Extended packet length bit (bit 14) on all chips that */
2730         /* support jumbo frames */
2731         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2732                 /* Cannot do read-modify-write on 5401 */
2733                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2734         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2735                 /* Set bit 14 with read-modify-write to preserve other bits */
2736                 err = tg3_phy_auxctl_read(tp,
2737                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2738                 if (!err)
2739                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2740                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2741         }
2742
2743         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2744          * jumbo frames transmission.
2745          */
2746         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2747                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2748                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2749                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2750         }
2751
2752         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2753                 /* adjust output voltage */
2754                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2755         }
2756
2757         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2758                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2759
2760         tg3_phy_toggle_automdix(tp, true);
2761         tg3_phy_set_wirespeed(tp);
2762         return 0;
2763 }
2764
2765 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2766 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2767 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2768                                           TG3_GPIO_MSG_NEED_VAUX)
2769 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2770         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2771          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2772          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2773          (TG3_GPIO_MSG_DRVR_PRES << 12))
2774
2775 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2776         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2777          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2778          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2779          (TG3_GPIO_MSG_NEED_VAUX << 12))
2780
2781 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2782 {
2783         u32 status, shift;
2784
2785         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2786             tg3_asic_rev(tp) == ASIC_REV_5719)
2787                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2788         else
2789                 status = tr32(TG3_CPMU_DRV_STATUS);
2790
2791         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2792         status &= ~(TG3_GPIO_MSG_MASK << shift);
2793         status |= (newstat << shift);
2794
2795         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2796             tg3_asic_rev(tp) == ASIC_REV_5719)
2797                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2798         else
2799                 tw32(TG3_CPMU_DRV_STATUS, status);
2800
2801         return status >> TG3_APE_GPIO_MSG_SHIFT;
2802 }
2803
2804 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2805 {
2806         if (!tg3_flag(tp, IS_NIC))
2807                 return 0;
2808
2809         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2810             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2811             tg3_asic_rev(tp) == ASIC_REV_5720) {
2812                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2813                         return -EIO;
2814
2815                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2816
2817                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2818                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2819
2820                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2821         } else {
2822                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2823                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2824         }
2825
2826         return 0;
2827 }
2828
2829 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2830 {
2831         u32 grc_local_ctrl;
2832
2833         if (!tg3_flag(tp, IS_NIC) ||
2834             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2835             tg3_asic_rev(tp) == ASIC_REV_5701)
2836                 return;
2837
2838         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2839
2840         tw32_wait_f(GRC_LOCAL_CTRL,
2841                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2842                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2843
2844         tw32_wait_f(GRC_LOCAL_CTRL,
2845                     grc_local_ctrl,
2846                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2847
2848         tw32_wait_f(GRC_LOCAL_CTRL,
2849                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2850                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2851 }
2852
2853 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2854 {
2855         if (!tg3_flag(tp, IS_NIC))
2856                 return;
2857
2858         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2859             tg3_asic_rev(tp) == ASIC_REV_5701) {
2860                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2861                             (GRC_LCLCTRL_GPIO_OE0 |
2862                              GRC_LCLCTRL_GPIO_OE1 |
2863                              GRC_LCLCTRL_GPIO_OE2 |
2864                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2865                              GRC_LCLCTRL_GPIO_OUTPUT1),
2866                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2867         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2868                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2869                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2870                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2871                                      GRC_LCLCTRL_GPIO_OE1 |
2872                                      GRC_LCLCTRL_GPIO_OE2 |
2873                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2874                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2875                                      tp->grc_local_ctrl;
2876                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2877                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2878
2879                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2880                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2881                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2882
2883                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2884                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2885                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2886         } else {
2887                 u32 no_gpio2;
2888                 u32 grc_local_ctrl = 0;
2889
2890                 /* Workaround to prevent overdrawing Amps. */
2891                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2892                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2893                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2894                                     grc_local_ctrl,
2895                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2896                 }
2897
2898                 /* On 5753 and variants, GPIO2 cannot be used. */
2899                 no_gpio2 = tp->nic_sram_data_cfg &
2900                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2901
2902                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2903                                   GRC_LCLCTRL_GPIO_OE1 |
2904                                   GRC_LCLCTRL_GPIO_OE2 |
2905                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2906                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2907                 if (no_gpio2) {
2908                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2909                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2910                 }
2911                 tw32_wait_f(GRC_LOCAL_CTRL,
2912                             tp->grc_local_ctrl | grc_local_ctrl,
2913                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2914
2915                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2916
2917                 tw32_wait_f(GRC_LOCAL_CTRL,
2918                             tp->grc_local_ctrl | grc_local_ctrl,
2919                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2920
2921                 if (!no_gpio2) {
2922                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2923                         tw32_wait_f(GRC_LOCAL_CTRL,
2924                                     tp->grc_local_ctrl | grc_local_ctrl,
2925                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2926                 }
2927         }
2928 }
2929
2930 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2931 {
2932         u32 msg = 0;
2933
2934         /* Serialize power state transitions */
2935         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2936                 return;
2937
2938         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2939                 msg = TG3_GPIO_MSG_NEED_VAUX;
2940
2941         msg = tg3_set_function_status(tp, msg);
2942
2943         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2944                 goto done;
2945
2946         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2947                 tg3_pwrsrc_switch_to_vaux(tp);
2948         else
2949                 tg3_pwrsrc_die_with_vmain(tp);
2950
2951 done:
2952         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2953 }
2954
2955 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2956 {
2957         bool need_vaux = false;
2958
2959         /* The GPIOs do something completely different on 57765. */
2960         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2961                 return;
2962
2963         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2964             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2965             tg3_asic_rev(tp) == ASIC_REV_5720) {
2966                 tg3_frob_aux_power_5717(tp, include_wol ?
2967                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2968                 return;
2969         }
2970
2971         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2972                 struct net_device *dev_peer;
2973
2974                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2975
2976                 /* remove_one() may have been run on the peer. */
2977                 if (dev_peer) {
2978                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2979
2980                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2981                                 return;
2982
2983                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2984                             tg3_flag(tp_peer, ENABLE_ASF))
2985                                 need_vaux = true;
2986                 }
2987         }
2988
2989         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2990             tg3_flag(tp, ENABLE_ASF))
2991                 need_vaux = true;
2992
2993         if (need_vaux)
2994                 tg3_pwrsrc_switch_to_vaux(tp);
2995         else
2996                 tg3_pwrsrc_die_with_vmain(tp);
2997 }
2998
2999 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3000 {
3001         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3002                 return 1;
3003         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3004                 if (speed != SPEED_10)
3005                         return 1;
3006         } else if (speed == SPEED_10)
3007                 return 1;
3008
3009         return 0;
3010 }
3011
3012 static bool tg3_phy_power_bug(struct tg3 *tp)
3013 {
3014         switch (tg3_asic_rev(tp)) {
3015         case ASIC_REV_5700:
3016         case ASIC_REV_5704:
3017                 return true;
3018         case ASIC_REV_5780:
3019                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3020                         return true;
3021                 return false;
3022         case ASIC_REV_5717:
3023                 if (!tp->pci_fn)
3024                         return true;
3025                 return false;
3026         case ASIC_REV_5719:
3027         case ASIC_REV_5720:
3028                 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3029                     !tp->pci_fn)
3030                         return true;
3031                 return false;
3032         }
3033
3034         return false;
3035 }
3036
3037 static bool tg3_phy_led_bug(struct tg3 *tp)
3038 {
3039         switch (tg3_asic_rev(tp)) {
3040         case ASIC_REV_5719:
3041         case ASIC_REV_5720:
3042                 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3043                     !tp->pci_fn)
3044                         return true;
3045                 return false;
3046         }
3047
3048         return false;
3049 }
3050
3051 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3052 {
3053         u32 val;
3054
3055         if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3056                 return;
3057
3058         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3059                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3060                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3061                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3062
3063                         sg_dig_ctrl |=
3064                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3065                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
3066                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3067                 }
3068                 return;
3069         }
3070
3071         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3072                 tg3_bmcr_reset(tp);
3073                 val = tr32(GRC_MISC_CFG);
3074                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3075                 udelay(40);
3076                 return;
3077         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3078                 u32 phytest;
3079                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3080                         u32 phy;
3081
3082                         tg3_writephy(tp, MII_ADVERTISE, 0);
3083                         tg3_writephy(tp, MII_BMCR,
3084                                      BMCR_ANENABLE | BMCR_ANRESTART);
3085
3086                         tg3_writephy(tp, MII_TG3_FET_TEST,
3087                                      phytest | MII_TG3_FET_SHADOW_EN);
3088                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3089                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3090                                 tg3_writephy(tp,
3091                                              MII_TG3_FET_SHDW_AUXMODE4,
3092                                              phy);
3093                         }
3094                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3095                 }
3096                 return;
3097         } else if (do_low_power) {
3098                 if (!tg3_phy_led_bug(tp))
3099                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3100                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3101
3102                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3103                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3104                       MII_TG3_AUXCTL_PCTL_VREG_11V;
3105                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3106         }
3107
3108         /* The PHY should not be powered down on some chips because
3109          * of bugs.
3110          */
3111         if (tg3_phy_power_bug(tp))
3112                 return;
3113
3114         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3115             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3116                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3117                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3118                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3119                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3120         }
3121
3122         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3123 }
3124
3125 /* tp->lock is held. */
3126 static int tg3_nvram_lock(struct tg3 *tp)
3127 {
3128         if (tg3_flag(tp, NVRAM)) {
3129                 int i;
3130
3131                 if (tp->nvram_lock_cnt == 0) {
3132                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3133                         for (i = 0; i < 8000; i++) {
3134                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3135                                         break;
3136                                 udelay(20);
3137                         }
3138                         if (i == 8000) {
3139                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3140                                 return -ENODEV;
3141                         }
3142                 }
3143                 tp->nvram_lock_cnt++;
3144         }
3145         return 0;
3146 }
3147
3148 /* tp->lock is held. */
3149 static void tg3_nvram_unlock(struct tg3 *tp)
3150 {
3151         if (tg3_flag(tp, NVRAM)) {
3152                 if (tp->nvram_lock_cnt > 0)
3153                         tp->nvram_lock_cnt--;
3154                 if (tp->nvram_lock_cnt == 0)
3155                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3156         }
3157 }
3158
3159 /* tp->lock is held. */
3160 static void tg3_enable_nvram_access(struct tg3 *tp)
3161 {
3162         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3163                 u32 nvaccess = tr32(NVRAM_ACCESS);
3164
3165                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3166         }
3167 }
3168
3169 /* tp->lock is held. */
3170 static void tg3_disable_nvram_access(struct tg3 *tp)
3171 {
3172         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3173                 u32 nvaccess = tr32(NVRAM_ACCESS);
3174
3175                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3176         }
3177 }
3178
3179 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3180                                         u32 offset, u32 *val)
3181 {
3182         u32 tmp;
3183         int i;
3184
3185         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3186                 return -EINVAL;
3187
3188         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3189                                         EEPROM_ADDR_DEVID_MASK |
3190                                         EEPROM_ADDR_READ);
3191         tw32(GRC_EEPROM_ADDR,
3192              tmp |
3193              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3194              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3195               EEPROM_ADDR_ADDR_MASK) |
3196              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3197
3198         for (i = 0; i < 1000; i++) {
3199                 tmp = tr32(GRC_EEPROM_ADDR);
3200
3201                 if (tmp & EEPROM_ADDR_COMPLETE)
3202                         break;
3203                 msleep(1);
3204         }
3205         if (!(tmp & EEPROM_ADDR_COMPLETE))
3206                 return -EBUSY;
3207
3208         tmp = tr32(GRC_EEPROM_DATA);
3209
3210         /*
3211          * The data will always be opposite the native endian
3212          * format.  Perform a blind byteswap to compensate.
3213          */
3214         *val = swab32(tmp);
3215
3216         return 0;
3217 }
3218
3219 #define NVRAM_CMD_TIMEOUT 5000
3220
3221 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3222 {
3223         int i;
3224
3225         tw32(NVRAM_CMD, nvram_cmd);
3226         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3227                 usleep_range(10, 40);
3228                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3229                         udelay(10);
3230                         break;
3231                 }
3232         }
3233
3234         if (i == NVRAM_CMD_TIMEOUT)
3235                 return -EBUSY;
3236
3237         return 0;
3238 }
3239
3240 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3241 {
3242         if (tg3_flag(tp, NVRAM) &&
3243             tg3_flag(tp, NVRAM_BUFFERED) &&
3244             tg3_flag(tp, FLASH) &&
3245             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3246             (tp->nvram_jedecnum == JEDEC_ATMEL))
3247
3248                 addr = ((addr / tp->nvram_pagesize) <<
3249                         ATMEL_AT45DB0X1B_PAGE_POS) +
3250                        (addr % tp->nvram_pagesize);
3251
3252         return addr;
3253 }
3254
3255 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3256 {
3257         if (tg3_flag(tp, NVRAM) &&
3258             tg3_flag(tp, NVRAM_BUFFERED) &&
3259             tg3_flag(tp, FLASH) &&
3260             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3261             (tp->nvram_jedecnum == JEDEC_ATMEL))
3262
3263                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3264                         tp->nvram_pagesize) +
3265                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3266
3267         return addr;
3268 }
3269
3270 /* NOTE: Data read in from NVRAM is byteswapped according to
3271  * the byteswapping settings for all other register accesses.
3272  * tg3 devices are BE devices, so on a BE machine, the data
3273  * returned will be exactly as it is seen in NVRAM.  On a LE
3274  * machine, the 32-bit value will be byteswapped.
3275  */
3276 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3277 {
3278         int ret;
3279
3280         if (!tg3_flag(tp, NVRAM))
3281                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3282
3283         offset = tg3_nvram_phys_addr(tp, offset);
3284
3285         if (offset > NVRAM_ADDR_MSK)
3286                 return -EINVAL;
3287
3288         ret = tg3_nvram_lock(tp);
3289         if (ret)
3290                 return ret;
3291
3292         tg3_enable_nvram_access(tp);
3293
3294         tw32(NVRAM_ADDR, offset);
3295         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3296                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3297
3298         if (ret == 0)
3299                 *val = tr32(NVRAM_RDDATA);
3300
3301         tg3_disable_nvram_access(tp);
3302
3303         tg3_nvram_unlock(tp);
3304
3305         return ret;
3306 }
3307
3308 /* Ensures NVRAM data is in bytestream format. */
3309 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3310 {
3311         u32 v;
3312         int res = tg3_nvram_read(tp, offset, &v);
3313         if (!res)
3314                 *val = cpu_to_be32(v);
3315         return res;
3316 }
3317
3318 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3319                                     u32 offset, u32 len, u8 *buf)
3320 {
3321         int i, j, rc = 0;
3322         u32 val;
3323
3324         for (i = 0; i < len; i += 4) {
3325                 u32 addr;
3326                 __be32 data;
3327
3328                 addr = offset + i;
3329
3330                 memcpy(&data, buf + i, 4);
3331
3332                 /*
3333                  * The SEEPROM interface expects the data to always be opposite
3334                  * the native endian format.  We accomplish this by reversing
3335                  * all the operations that would have been performed on the
3336                  * data from a call to tg3_nvram_read_be32().
3337                  */
3338                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3339
3340                 val = tr32(GRC_EEPROM_ADDR);
3341                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3342
3343                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3344                         EEPROM_ADDR_READ);
3345                 tw32(GRC_EEPROM_ADDR, val |
3346                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3347                         (addr & EEPROM_ADDR_ADDR_MASK) |
3348                         EEPROM_ADDR_START |
3349                         EEPROM_ADDR_WRITE);
3350
3351                 for (j = 0; j < 1000; j++) {
3352                         val = tr32(GRC_EEPROM_ADDR);
3353
3354                         if (val & EEPROM_ADDR_COMPLETE)
3355                                 break;
3356                         msleep(1);
3357                 }
3358                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3359                         rc = -EBUSY;
3360                         break;
3361                 }
3362         }
3363
3364         return rc;
3365 }
3366
3367 /* offset and length are dword aligned */
3368 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3369                 u8 *buf)
3370 {
3371         int ret = 0;
3372         u32 pagesize = tp->nvram_pagesize;
3373         u32 pagemask = pagesize - 1;
3374         u32 nvram_cmd;
3375         u8 *tmp;
3376
3377         tmp = kmalloc(pagesize, GFP_KERNEL);
3378         if (tmp == NULL)
3379                 return -ENOMEM;
3380
3381         while (len) {
3382                 int j;
3383                 u32 phy_addr, page_off, size;
3384
3385                 phy_addr = offset & ~pagemask;
3386
3387                 for (j = 0; j < pagesize; j += 4) {
3388                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3389                                                   (__be32 *) (tmp + j));
3390                         if (ret)
3391                                 break;
3392                 }
3393                 if (ret)
3394                         break;
3395
3396                 page_off = offset & pagemask;
3397                 size = pagesize;
3398                 if (len < size)
3399                         size = len;
3400
3401                 len -= size;
3402
3403                 memcpy(tmp + page_off, buf, size);
3404
3405                 offset = offset + (pagesize - page_off);
3406
3407                 tg3_enable_nvram_access(tp);
3408
3409                 /*
3410                  * Before we can erase the flash page, we need
3411                  * to issue a special "write enable" command.
3412                  */
3413                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3414
3415                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3416                         break;
3417
3418                 /* Erase the target page */
3419                 tw32(NVRAM_ADDR, phy_addr);
3420
3421                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3422                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3423
3424                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3425                         break;
3426
3427                 /* Issue another write enable to start the write. */
3428                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3429
3430                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3431                         break;
3432
3433                 for (j = 0; j < pagesize; j += 4) {
3434                         __be32 data;
3435
3436                         data = *((__be32 *) (tmp + j));
3437
3438                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3439
3440                         tw32(NVRAM_ADDR, phy_addr + j);
3441
3442                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3443                                 NVRAM_CMD_WR;
3444
3445                         if (j == 0)
3446                                 nvram_cmd |= NVRAM_CMD_FIRST;
3447                         else if (j == (pagesize - 4))
3448                                 nvram_cmd |= NVRAM_CMD_LAST;
3449
3450                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3451                         if (ret)
3452                                 break;
3453                 }
3454                 if (ret)
3455                         break;
3456         }
3457
3458         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3459         tg3_nvram_exec_cmd(tp, nvram_cmd);
3460
3461         kfree(tmp);
3462
3463         return ret;
3464 }
3465
3466 /* offset and length are dword aligned */
3467 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3468                 u8 *buf)
3469 {
3470         int i, ret = 0;
3471
3472         for (i = 0; i < len; i += 4, offset += 4) {
3473                 u32 page_off, phy_addr, nvram_cmd;
3474                 __be32 data;
3475
3476                 memcpy(&data, buf + i, 4);
3477                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3478
3479                 page_off = offset % tp->nvram_pagesize;
3480
3481                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3482
3483                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3484
3485                 if (page_off == 0 || i == 0)
3486                         nvram_cmd |= NVRAM_CMD_FIRST;
3487                 if (page_off == (tp->nvram_pagesize - 4))
3488                         nvram_cmd |= NVRAM_CMD_LAST;
3489
3490                 if (i == (len - 4))
3491                         nvram_cmd |= NVRAM_CMD_LAST;
3492
3493                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3494                     !tg3_flag(tp, FLASH) ||
3495                     !tg3_flag(tp, 57765_PLUS))
3496                         tw32(NVRAM_ADDR, phy_addr);
3497
3498                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3499                     !tg3_flag(tp, 5755_PLUS) &&
3500                     (tp->nvram_jedecnum == JEDEC_ST) &&
3501                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3502                         u32 cmd;
3503
3504                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3505                         ret = tg3_nvram_exec_cmd(tp, cmd);
3506                         if (ret)
3507                                 break;
3508                 }
3509                 if (!tg3_flag(tp, FLASH)) {
3510                         /* We always do complete word writes to eeprom. */
3511                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3512                 }
3513
3514                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3515                 if (ret)
3516                         break;
3517         }
3518         return ret;
3519 }
3520
3521 /* offset and length are dword aligned */
3522 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3523 {
3524         int ret;
3525
3526         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3527                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3528                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3529                 udelay(40);
3530         }
3531
3532         if (!tg3_flag(tp, NVRAM)) {
3533                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3534         } else {
3535                 u32 grc_mode;
3536
3537                 ret = tg3_nvram_lock(tp);
3538                 if (ret)
3539                         return ret;
3540
3541                 tg3_enable_nvram_access(tp);
3542                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3543                         tw32(NVRAM_WRITE1, 0x406);
3544
3545                 grc_mode = tr32(GRC_MODE);
3546                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3547
3548                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3549                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3550                                 buf);
3551                 } else {
3552                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3553                                 buf);
3554                 }
3555
3556                 grc_mode = tr32(GRC_MODE);
3557                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3558
3559                 tg3_disable_nvram_access(tp);
3560                 tg3_nvram_unlock(tp);
3561         }
3562
3563         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3564                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3565                 udelay(40);
3566         }
3567
3568         return ret;
3569 }
3570
3571 #define RX_CPU_SCRATCH_BASE     0x30000
3572 #define RX_CPU_SCRATCH_SIZE     0x04000
3573 #define TX_CPU_SCRATCH_BASE     0x34000
3574 #define TX_CPU_SCRATCH_SIZE     0x04000
3575
3576 /* tp->lock is held. */
3577 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3578 {
3579         int i;
3580         const int iters = 10000;
3581
3582         for (i = 0; i < iters; i++) {
3583                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3584                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3585                 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3586                         break;
3587                 if (pci_channel_offline(tp->pdev))
3588                         return -EBUSY;
3589         }
3590
3591         return (i == iters) ? -EBUSY : 0;
3592 }
3593
3594 /* tp->lock is held. */
3595 static int tg3_rxcpu_pause(struct tg3 *tp)
3596 {
3597         int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3598
3599         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3600         tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3601         udelay(10);
3602
3603         return rc;
3604 }
3605
3606 /* tp->lock is held. */
3607 static int tg3_txcpu_pause(struct tg3 *tp)
3608 {
3609         return tg3_pause_cpu(tp, TX_CPU_BASE);
3610 }
3611
3612 /* tp->lock is held. */
3613 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3614 {
3615         tw32(cpu_base + CPU_STATE, 0xffffffff);
3616         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3617 }
3618
3619 /* tp->lock is held. */
3620 static void tg3_rxcpu_resume(struct tg3 *tp)
3621 {
3622         tg3_resume_cpu(tp, RX_CPU_BASE);
3623 }
3624
3625 /* tp->lock is held. */
3626 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3627 {
3628         int rc;
3629
3630         BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3631
3632         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3633                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3634
3635                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3636                 return 0;
3637         }
3638         if (cpu_base == RX_CPU_BASE) {
3639                 rc = tg3_rxcpu_pause(tp);
3640         } else {
3641                 /*
3642                  * There is only an Rx CPU for the 5750 derivative in the
3643                  * BCM4785.
3644                  */
3645                 if (tg3_flag(tp, IS_SSB_CORE))
3646                         return 0;
3647
3648                 rc = tg3_txcpu_pause(tp);
3649         }
3650
3651         if (rc) {
3652                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3653                            __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3654                 return -ENODEV;
3655         }
3656
3657         /* Clear firmware's nvram arbitration. */
3658         if (tg3_flag(tp, NVRAM))
3659                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3660         return 0;
3661 }
3662
3663 static int tg3_fw_data_len(struct tg3 *tp,
3664                            const struct tg3_firmware_hdr *fw_hdr)
3665 {
3666         int fw_len;
3667
3668         /* Non fragmented firmware have one firmware header followed by a
3669          * contiguous chunk of data to be written. The length field in that
3670          * header is not the length of data to be written but the complete
3671          * length of the bss. The data length is determined based on
3672          * tp->fw->size minus headers.
3673          *
3674          * Fragmented firmware have a main header followed by multiple
3675          * fragments. Each fragment is identical to non fragmented firmware
3676          * with a firmware header followed by a contiguous chunk of data. In
3677          * the main header, the length field is unused and set to 0xffffffff.
3678          * In each fragment header the length is the entire size of that
3679          * fragment i.e. fragment data + header length. Data length is
3680          * therefore length field in the header minus TG3_FW_HDR_LEN.
3681          */
3682         if (tp->fw_len == 0xffffffff)
3683                 fw_len = be32_to_cpu(fw_hdr->len);
3684         else
3685                 fw_len = tp->fw->size;
3686
3687         return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3688 }
3689
3690 /* tp->lock is held. */
3691 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3692                                  u32 cpu_scratch_base, int cpu_scratch_size,
3693                                  const struct tg3_firmware_hdr *fw_hdr)
3694 {
3695         int err, i;
3696         void (*write_op)(struct tg3 *, u32, u32);
3697         int total_len = tp->fw->size;
3698
3699         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3700                 netdev_err(tp->dev,
3701                            "%s: Trying to load TX cpu firmware which is 5705\n",
3702                            __func__);
3703                 return -EINVAL;
3704         }
3705
3706         if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3707                 write_op = tg3_write_mem;
3708         else
3709                 write_op = tg3_write_indirect_reg32;
3710
3711         if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3712                 /* It is possible that bootcode is still loading at this point.
3713                  * Get the nvram lock first before halting the cpu.
3714                  */
3715                 int lock_err = tg3_nvram_lock(tp);
3716                 err = tg3_halt_cpu(tp, cpu_base);
3717                 if (!lock_err)
3718                         tg3_nvram_unlock(tp);
3719                 if (err)
3720                         goto out;
3721
3722                 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3723                         write_op(tp, cpu_scratch_base + i, 0);
3724                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3725                 tw32(cpu_base + CPU_MODE,
3726                      tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3727         } else {
3728                 /* Subtract additional main header for fragmented firmware and
3729                  * advance to the first fragment
3730                  */
3731                 total_len -= TG3_FW_HDR_LEN;
3732                 fw_hdr++;
3733         }
3734
3735         do {
3736                 u32 *fw_data = (u32 *)(fw_hdr + 1);
3737                 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3738                         write_op(tp, cpu_scratch_base +
3739                                      (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3740                                      (i * sizeof(u32)),
3741                                  be32_to_cpu(fw_data[i]));
3742
3743                 total_len -= be32_to_cpu(fw_hdr->len);
3744
3745                 /* Advance to next fragment */
3746                 fw_hdr = (struct tg3_firmware_hdr *)
3747                          ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3748         } while (total_len > 0);
3749
3750         err = 0;
3751
3752 out:
3753         return err;
3754 }
3755
3756 /* tp->lock is held. */
3757 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3758 {
3759         int i;
3760         const int iters = 5;
3761
3762         tw32(cpu_base + CPU_STATE, 0xffffffff);
3763         tw32_f(cpu_base + CPU_PC, pc);
3764
3765         for (i = 0; i < iters; i++) {
3766                 if (tr32(cpu_base + CPU_PC) == pc)
3767                         break;
3768                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3769                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3770                 tw32_f(cpu_base + CPU_PC, pc);
3771                 udelay(1000);
3772         }
3773
3774         return (i == iters) ? -EBUSY : 0;
3775 }
3776
3777 /* tp->lock is held. */
3778 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3779 {
3780         const struct tg3_firmware_hdr *fw_hdr;
3781         int err;
3782
3783         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3784
3785         /* Firmware blob starts with version numbers, followed by
3786            start address and length. We are setting complete length.
3787            length = end_address_of_bss - start_address_of_text.
3788            Remainder is the blob to be loaded contiguously
3789            from start address. */
3790
3791         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3792                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3793                                     fw_hdr);
3794         if (err)
3795                 return err;
3796
3797         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3798                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3799                                     fw_hdr);
3800         if (err)
3801                 return err;
3802
3803         /* Now startup only the RX cpu. */
3804         err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3805                                        be32_to_cpu(fw_hdr->base_addr));
3806         if (err) {
3807                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3808                            "should be %08x\n", __func__,
3809                            tr32(RX_CPU_BASE + CPU_PC),
3810                                 be32_to_cpu(fw_hdr->base_addr));
3811                 return -ENODEV;
3812         }
3813
3814         tg3_rxcpu_resume(tp);
3815
3816         return 0;
3817 }
3818
3819 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3820 {
3821         const int iters = 1000;
3822         int i;
3823         u32 val;
3824
3825         /* Wait for boot code to complete initialization and enter service
3826          * loop. It is then safe to download service patches
3827          */
3828         for (i = 0; i < iters; i++) {
3829                 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3830                         break;
3831
3832                 udelay(10);
3833         }
3834
3835         if (i == iters) {
3836                 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3837                 return -EBUSY;
3838         }
3839
3840         val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3841         if (val & 0xff) {
3842                 netdev_warn(tp->dev,
3843                             "Other patches exist. Not downloading EEE patch\n");
3844                 return -EEXIST;
3845         }
3846
3847         return 0;
3848 }
3849
3850 /* tp->lock is held. */
3851 static void tg3_load_57766_firmware(struct tg3 *tp)
3852 {
3853         struct tg3_firmware_hdr *fw_hdr;
3854
3855         if (!tg3_flag(tp, NO_NVRAM))
3856                 return;
3857
3858         if (tg3_validate_rxcpu_state(tp))
3859                 return;
3860
3861         if (!tp->fw)
3862                 return;
3863
3864         /* This firmware blob has a different format than older firmware
3865          * releases as given below. The main difference is we have fragmented
3866          * data to be written to non-contiguous locations.
3867          *
3868          * In the beginning we have a firmware header identical to other
3869          * firmware which consists of version, base addr and length. The length
3870          * here is unused and set to 0xffffffff.
3871          *
3872          * This is followed by a series of firmware fragments which are
3873          * individually identical to previous firmware. i.e. they have the
3874          * firmware header and followed by data for that fragment. The version
3875          * field of the individual fragment header is unused.
3876          */
3877
3878         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3879         if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3880                 return;
3881
3882         if (tg3_rxcpu_pause(tp))
3883                 return;
3884
3885         /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3886         tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3887
3888         tg3_rxcpu_resume(tp);
3889 }
3890
3891 /* tp->lock is held. */
3892 static int tg3_load_tso_firmware(struct tg3 *tp)
3893 {
3894         const struct tg3_firmware_hdr *fw_hdr;
3895         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3896         int err;
3897
3898         if (!tg3_flag(tp, FW_TSO))
3899                 return 0;
3900
3901         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3902
3903         /* Firmware blob starts with version numbers, followed by
3904            start address and length. We are setting complete length.
3905            length = end_address_of_bss - start_address_of_text.
3906            Remainder is the blob to be loaded contiguously
3907            from start address. */
3908
3909         cpu_scratch_size = tp->fw_len;
3910
3911         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3912                 cpu_base = RX_CPU_BASE;
3913                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3914         } else {
3915                 cpu_base = TX_CPU_BASE;
3916                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3917                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3918         }
3919
3920         err = tg3_load_firmware_cpu(tp, cpu_base,
3921                                     cpu_scratch_base, cpu_scratch_size,
3922                                     fw_hdr);
3923         if (err)
3924                 return err;
3925
3926         /* Now startup the cpu. */
3927         err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3928                                        be32_to_cpu(fw_hdr->base_addr));
3929         if (err) {
3930                 netdev_err(tp->dev,
3931                            "%s fails to set CPU PC, is %08x should be %08x\n",
3932                            __func__, tr32(cpu_base + CPU_PC),
3933                            be32_to_cpu(fw_hdr->base_addr));
3934                 return -ENODEV;
3935         }
3936
3937         tg3_resume_cpu(tp, cpu_base);
3938         return 0;
3939 }
3940
3941 /* tp->lock is held. */
3942 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3943 {
3944         u32 addr_high, addr_low;
3945
3946         addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3947         addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3948                     (mac_addr[4] <<  8) | mac_addr[5]);
3949
3950         if (index < 4) {
3951                 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3952                 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3953         } else {
3954                 index -= 4;
3955                 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3956                 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3957         }
3958 }
3959
3960 /* tp->lock is held. */
3961 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3962 {
3963         u32 addr_high;
3964         int i;
3965
3966         for (i = 0; i < 4; i++) {
3967                 if (i == 1 && skip_mac_1)
3968                         continue;
3969                 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3970         }
3971
3972         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3973             tg3_asic_rev(tp) == ASIC_REV_5704) {
3974                 for (i = 4; i < 16; i++)
3975                         __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3976         }
3977
3978         addr_high = (tp->dev->dev_addr[0] +
3979                      tp->dev->dev_addr[1] +
3980                      tp->dev->dev_addr[2] +
3981                      tp->dev->dev_addr[3] +
3982                      tp->dev->dev_addr[4] +
3983                      tp->dev->dev_addr[5]) &
3984                 TX_BACKOFF_SEED_MASK;
3985         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3986 }
3987
3988 static void tg3_enable_register_access(struct tg3 *tp)
3989 {
3990         /*
3991          * Make sure register accesses (indirect or otherwise) will function
3992          * correctly.
3993          */
3994         pci_write_config_dword(tp->pdev,
3995                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3996 }
3997
3998 static int tg3_power_up(struct tg3 *tp)
3999 {
4000         int err;
4001
4002         tg3_enable_register_access(tp);
4003
4004         err = pci_set_power_state(tp->pdev, PCI_D0);
4005         if (!err) {
4006                 /* Switch out of Vaux if it is a NIC */
4007                 tg3_pwrsrc_switch_to_vmain(tp);
4008         } else {
4009                 netdev_err(tp->dev, "Transition to D0 failed\n");
4010         }
4011
4012         return err;
4013 }
4014
4015 static int tg3_setup_phy(struct tg3 *, bool);
4016
4017 static int tg3_power_down_prepare(struct tg3 *tp)
4018 {
4019         u32 misc_host_ctrl;
4020         bool device_should_wake, do_low_power;
4021
4022         tg3_enable_register_access(tp);
4023
4024         /* Restore the CLKREQ setting. */
4025         if (tg3_flag(tp, CLKREQ_BUG))
4026                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4027                                          PCI_EXP_LNKCTL_CLKREQ_EN);
4028
4029         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4030         tw32(TG3PCI_MISC_HOST_CTRL,
4031              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4032
4033         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4034                              tg3_flag(tp, WOL_ENABLE);
4035
4036         if (tg3_flag(tp, USE_PHYLIB)) {
4037                 do_low_power = false;
4038                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4039                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4040                         struct phy_device *phydev;
4041                         u32 phyid, advertising;
4042
4043                         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4044
4045                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4046
4047                         tp->link_config.speed = phydev->speed;
4048                         tp->link_config.duplex = phydev->duplex;
4049                         tp->link_config.autoneg = phydev->autoneg;
4050                         tp->link_config.advertising = phydev->advertising;
4051
4052                         advertising = ADVERTISED_TP |
4053                                       ADVERTISED_Pause |
4054                                       ADVERTISED_Autoneg |
4055                                       ADVERTISED_10baseT_Half;
4056
4057                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4058                                 if (tg3_flag(tp, WOL_SPEED_100MB))
4059                                         advertising |=
4060                                                 ADVERTISED_100baseT_Half |
4061                                                 ADVERTISED_100baseT_Full |
4062                                                 ADVERTISED_10baseT_Full;
4063                                 else
4064                                         advertising |= ADVERTISED_10baseT_Full;
4065                         }
4066
4067                         phydev->advertising = advertising;
4068
4069                         phy_start_aneg(phydev);
4070
4071                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4072                         if (phyid != PHY_ID_BCMAC131) {
4073                                 phyid &= PHY_BCM_OUI_MASK;
4074                                 if (phyid == PHY_BCM_OUI_1 ||
4075                                     phyid == PHY_BCM_OUI_2 ||
4076                                     phyid == PHY_BCM_OUI_3)
4077                                         do_low_power = true;
4078                         }
4079                 }
4080         } else {
4081                 do_low_power = true;
4082
4083                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4084                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4085
4086                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4087                         tg3_setup_phy(tp, false);
4088         }
4089
4090         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4091                 u32 val;
4092
4093                 val = tr32(GRC_VCPU_EXT_CTRL);
4094                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4095         } else if (!tg3_flag(tp, ENABLE_ASF)) {
4096                 int i;
4097                 u32 val;
4098
4099                 for (i = 0; i < 200; i++) {
4100                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4101                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4102                                 break;
4103                         msleep(1);
4104                 }
4105         }
4106         if (tg3_flag(tp, WOL_CAP))
4107                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4108                                                      WOL_DRV_STATE_SHUTDOWN |
4109                                                      WOL_DRV_WOL |
4110                                                      WOL_SET_MAGIC_PKT);
4111
4112         if (device_should_wake) {
4113                 u32 mac_mode;
4114
4115                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4116                         if (do_low_power &&
4117                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4118                                 tg3_phy_auxctl_write(tp,
4119                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4120                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
4121                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4122                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4123                                 udelay(40);
4124                         }
4125
4126                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4127                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
4128                         else if (tp->phy_flags &
4129                                  TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4130                                 if (tp->link_config.active_speed == SPEED_1000)
4131                                         mac_mode = MAC_MODE_PORT_MODE_GMII;
4132                                 else
4133                                         mac_mode = MAC_MODE_PORT_MODE_MII;
4134                         } else
4135                                 mac_mode = MAC_MODE_PORT_MODE_MII;
4136
4137                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4138                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4139                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4140                                              SPEED_100 : SPEED_10;
4141                                 if (tg3_5700_link_polarity(tp, speed))
4142                                         mac_mode |= MAC_MODE_LINK_POLARITY;
4143                                 else
4144                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
4145                         }
4146                 } else {
4147                         mac_mode = MAC_MODE_PORT_MODE_TBI;
4148                 }
4149
4150                 if (!tg3_flag(tp, 5750_PLUS))
4151                         tw32(MAC_LED_CTRL, tp->led_ctrl);
4152
4153                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4154                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4155                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4156                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4157
4158                 if (tg3_flag(tp, ENABLE_APE))
4159                         mac_mode |= MAC_MODE_APE_TX_EN |
4160                                     MAC_MODE_APE_RX_EN |
4161                                     MAC_MODE_TDE_ENABLE;
4162
4163                 tw32_f(MAC_MODE, mac_mode);
4164                 udelay(100);
4165
4166                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4167                 udelay(10);
4168         }
4169
4170         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4171             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4172              tg3_asic_rev(tp) == ASIC_REV_5701)) {
4173                 u32 base_val;
4174
4175                 base_val = tp->pci_clock_ctrl;
4176                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4177                              CLOCK_CTRL_TXCLK_DISABLE);
4178
4179                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4180                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
4181         } else if (tg3_flag(tp, 5780_CLASS) ||
4182                    tg3_flag(tp, CPMU_PRESENT) ||
4183                    tg3_asic_rev(tp) == ASIC_REV_5906) {
4184                 /* do nothing */
4185         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4186                 u32 newbits1, newbits2;
4187
4188                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4189                     tg3_asic_rev(tp) == ASIC_REV_5701) {
4190                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4191                                     CLOCK_CTRL_TXCLK_DISABLE |
4192                                     CLOCK_CTRL_ALTCLK);
4193                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4194                 } else if (tg3_flag(tp, 5705_PLUS)) {
4195                         newbits1 = CLOCK_CTRL_625_CORE;
4196                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4197                 } else {
4198                         newbits1 = CLOCK_CTRL_ALTCLK;
4199                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4200                 }
4201
4202                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4203                             40);
4204
4205                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4206                             40);
4207
4208                 if (!tg3_flag(tp, 5705_PLUS)) {
4209                         u32 newbits3;
4210
4211                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4212                             tg3_asic_rev(tp) == ASIC_REV_5701) {
4213                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4214                                             CLOCK_CTRL_TXCLK_DISABLE |
4215                                             CLOCK_CTRL_44MHZ_CORE);
4216                         } else {
4217                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4218                         }
4219
4220                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
4221                                     tp->pci_clock_ctrl | newbits3, 40);
4222                 }
4223         }
4224
4225         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4226                 tg3_power_down_phy(tp, do_low_power);
4227
4228         tg3_frob_aux_power(tp, true);
4229
4230         /* Workaround for unstable PLL clock */
4231         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4232             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4233              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4234                 u32 val = tr32(0x7d00);
4235
4236                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4237                 tw32(0x7d00, val);
4238                 if (!tg3_flag(tp, ENABLE_ASF)) {
4239                         int err;
4240
4241                         err = tg3_nvram_lock(tp);
4242                         tg3_halt_cpu(tp, RX_CPU_BASE);
4243                         if (!err)
4244                                 tg3_nvram_unlock(tp);
4245                 }
4246         }
4247
4248         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4249
4250         tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4251
4252         return 0;
4253 }
4254
4255 static void tg3_power_down(struct tg3 *tp)
4256 {
4257         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4258         pci_set_power_state(tp->pdev, PCI_D3hot);
4259 }
4260
4261 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4262 {
4263         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4264         case MII_TG3_AUX_STAT_10HALF:
4265                 *speed = SPEED_10;
4266                 *duplex = DUPLEX_HALF;
4267                 break;
4268
4269         case MII_TG3_AUX_STAT_10FULL:
4270                 *speed = SPEED_10;
4271                 *duplex = DUPLEX_FULL;
4272                 break;
4273
4274         case MII_TG3_AUX_STAT_100HALF:
4275                 *speed = SPEED_100;
4276                 *duplex = DUPLEX_HALF;
4277                 break;
4278
4279         case MII_TG3_AUX_STAT_100FULL:
4280                 *speed = SPEED_100;
4281                 *duplex = DUPLEX_FULL;
4282                 break;
4283
4284         case MII_TG3_AUX_STAT_1000HALF:
4285                 *speed = SPEED_1000;
4286                 *duplex = DUPLEX_HALF;
4287                 break;
4288
4289         case MII_TG3_AUX_STAT_1000FULL:
4290                 *speed = SPEED_1000;
4291                 *duplex = DUPLEX_FULL;
4292                 break;
4293
4294         default:
4295                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4296                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4297                                  SPEED_10;
4298                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4299                                   DUPLEX_HALF;
4300                         break;
4301                 }
4302                 *speed = SPEED_UNKNOWN;
4303                 *duplex = DUPLEX_UNKNOWN;
4304                 break;
4305         }
4306 }
4307
4308 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4309 {
4310         int err = 0;
4311         u32 val, new_adv;
4312
4313         new_adv = ADVERTISE_CSMA;
4314         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4315         new_adv |= mii_advertise_flowctrl(flowctrl);
4316
4317         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4318         if (err)
4319                 goto done;
4320
4321         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4322                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4323
4324                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4325                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4326                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4327
4328                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4329                 if (err)
4330                         goto done;
4331         }
4332
4333         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4334                 goto done;
4335
4336         tw32(TG3_CPMU_EEE_MODE,
4337              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4338
4339         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4340         if (!err) {
4341                 u32 err2;
4342
4343                 val = 0;
4344                 /* Advertise 100-BaseTX EEE ability */
4345                 if (advertise & ADVERTISED_100baseT_Full)
4346                         val |= MDIO_AN_EEE_ADV_100TX;
4347                 /* Advertise 1000-BaseT EEE ability */
4348                 if (advertise & ADVERTISED_1000baseT_Full)
4349                         val |= MDIO_AN_EEE_ADV_1000T;
4350
4351                 if (!tp->eee.eee_enabled) {
4352                         val = 0;
4353                         tp->eee.advertised = 0;
4354                 } else {
4355                         tp->eee.advertised = advertise &
4356                                              (ADVERTISED_100baseT_Full |
4357                                               ADVERTISED_1000baseT_Full);
4358                 }
4359
4360                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4361                 if (err)
4362                         val = 0;
4363
4364                 switch (tg3_asic_rev(tp)) {
4365                 case ASIC_REV_5717:
4366                 case ASIC_REV_57765:
4367                 case ASIC_REV_57766:
4368                 case ASIC_REV_5719:
4369                         /* If we advertised any eee advertisements above... */
4370                         if (val)
4371                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4372                                       MII_TG3_DSP_TAP26_RMRXSTO |
4373                                       MII_TG3_DSP_TAP26_OPCSINPT;
4374                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4375                         /* Fall through */
4376                 case ASIC_REV_5720:
4377                 case ASIC_REV_5762:
4378                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4379                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4380                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4381                 }
4382
4383                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4384                 if (!err)
4385                         err = err2;
4386         }
4387
4388 done:
4389         return err;
4390 }
4391
4392 static void tg3_phy_copper_begin(struct tg3 *tp)
4393 {
4394         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4395             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4396                 u32 adv, fc;
4397
4398                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4399                     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4400                         adv = ADVERTISED_10baseT_Half |
4401                               ADVERTISED_10baseT_Full;
4402                         if (tg3_flag(tp, WOL_SPEED_100MB))
4403                                 adv |= ADVERTISED_100baseT_Half |
4404                                        ADVERTISED_100baseT_Full;
4405                         if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4406                                 if (!(tp->phy_flags &
4407                                       TG3_PHYFLG_DISABLE_1G_HD_ADV))
4408                                         adv |= ADVERTISED_1000baseT_Half;
4409                                 adv |= ADVERTISED_1000baseT_Full;
4410                         }
4411
4412                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4413                 } else {
4414                         adv = tp->link_config.advertising;
4415                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4416                                 adv &= ~(ADVERTISED_1000baseT_Half |
4417                                          ADVERTISED_1000baseT_Full);
4418
4419                         fc = tp->link_config.flowctrl;
4420                 }
4421
4422                 tg3_phy_autoneg_cfg(tp, adv, fc);
4423
4424                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4425                     (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4426                         /* Normally during power down we want to autonegotiate
4427                          * the lowest possible speed for WOL. However, to avoid
4428                          * link flap, we leave it untouched.
4429                          */
4430                         return;
4431                 }
4432
4433                 tg3_writephy(tp, MII_BMCR,
4434                              BMCR_ANENABLE | BMCR_ANRESTART);
4435         } else {
4436                 int i;
4437                 u32 bmcr, orig_bmcr;
4438
4439                 tp->link_config.active_speed = tp->link_config.speed;
4440                 tp->link_config.active_duplex = tp->link_config.duplex;
4441
4442                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4443                         /* With autoneg disabled, 5715 only links up when the
4444                          * advertisement register has the configured speed
4445                          * enabled.
4446                          */
4447                         tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4448                 }
4449
4450                 bmcr = 0;
4451                 switch (tp->link_config.speed) {
4452                 default:
4453                 case SPEED_10:
4454                         break;
4455
4456                 case SPEED_100:
4457                         bmcr |= BMCR_SPEED100;
4458                         break;
4459
4460                 case SPEED_1000:
4461                         bmcr |= BMCR_SPEED1000;
4462                         break;
4463                 }
4464
4465                 if (tp->link_config.duplex == DUPLEX_FULL)
4466                         bmcr |= BMCR_FULLDPLX;
4467
4468                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4469                     (bmcr != orig_bmcr)) {
4470                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4471                         for (i = 0; i < 1500; i++) {
4472                                 u32 tmp;
4473
4474                                 udelay(10);
4475                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4476                                     tg3_readphy(tp, MII_BMSR, &tmp))
4477                                         continue;
4478                                 if (!(tmp & BMSR_LSTATUS)) {
4479                                         udelay(40);
4480                                         break;
4481                                 }
4482                         }
4483                         tg3_writephy(tp, MII_BMCR, bmcr);
4484                         udelay(40);
4485                 }
4486         }
4487 }
4488
4489 static int tg3_phy_pull_config(struct tg3 *tp)
4490 {
4491         int err;
4492         u32 val;
4493
4494         err = tg3_readphy(tp, MII_BMCR, &val);
4495         if (err)
4496                 goto done;
4497
4498         if (!(val & BMCR_ANENABLE)) {
4499                 tp->link_config.autoneg = AUTONEG_DISABLE;
4500                 tp->link_config.advertising = 0;
4501                 tg3_flag_clear(tp, PAUSE_AUTONEG);
4502
4503                 err = -EIO;
4504
4505                 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4506                 case 0:
4507                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4508                                 goto done;
4509
4510                         tp->link_config.speed = SPEED_10;
4511                         break;
4512                 case BMCR_SPEED100:
4513                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4514                                 goto done;
4515
4516                         tp->link_config.speed = SPEED_100;
4517                         break;
4518                 case BMCR_SPEED1000:
4519                         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4520                                 tp->link_config.speed = SPEED_1000;
4521                                 break;
4522                         }
4523                         /* Fall through */
4524                 default:
4525                         goto done;
4526                 }
4527
4528                 if (val & BMCR_FULLDPLX)
4529                         tp->link_config.duplex = DUPLEX_FULL;
4530                 else
4531                         tp->link_config.duplex = DUPLEX_HALF;
4532
4533                 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4534
4535                 err = 0;
4536                 goto done;
4537         }
4538
4539         tp->link_config.autoneg = AUTONEG_ENABLE;
4540         tp->link_config.advertising = ADVERTISED_Autoneg;
4541         tg3_flag_set(tp, PAUSE_AUTONEG);
4542
4543         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4544                 u32 adv;
4545
4546                 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4547                 if (err)
4548                         goto done;
4549
4550                 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4551                 tp->link_config.advertising |= adv | ADVERTISED_TP;
4552
4553                 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4554         } else {
4555                 tp->link_config.advertising |= ADVERTISED_FIBRE;
4556         }
4557
4558         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4559                 u32 adv;
4560
4561                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4562                         err = tg3_readphy(tp, MII_CTRL1000, &val);
4563                         if (err)
4564                                 goto done;
4565
4566                         adv = mii_ctrl1000_to_ethtool_adv_t(val);
4567                 } else {
4568                         err = tg3_readphy(tp, MII_ADVERTISE, &val);
4569                         if (err)
4570                                 goto done;
4571
4572                         adv = tg3_decode_flowctrl_1000X(val);
4573                         tp->link_config.flowctrl = adv;
4574
4575                         val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4576                         adv = mii_adv_to_ethtool_adv_x(val);
4577                 }
4578
4579                 tp->link_config.advertising |= adv;
4580         }
4581
4582 done:
4583         return err;
4584 }
4585
4586 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4587 {
4588         int err;
4589
4590         /* Turn off tap power management. */
4591         /* Set Extended packet length bit */
4592         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4593
4594         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4595         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4596         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4597         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4598         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4599
4600         udelay(40);
4601
4602         return err;
4603 }
4604
4605 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4606 {
4607         struct ethtool_eee eee;
4608
4609         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4610                 return true;
4611
4612         tg3_eee_pull_config(tp, &eee);
4613
4614         if (tp->eee.eee_enabled) {
4615                 if (tp->eee.advertised != eee.advertised ||
4616                     tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4617                     tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4618                         return false;
4619         } else {
4620                 /* EEE is disabled but we're advertising */
4621                 if (eee.advertised)
4622                         return false;
4623         }
4624
4625         return true;
4626 }
4627
4628 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4629 {
4630         u32 advmsk, tgtadv, advertising;
4631
4632         advertising = tp->link_config.advertising;
4633         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4634
4635         advmsk = ADVERTISE_ALL;
4636         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4637                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4638                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4639         }
4640
4641         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4642                 return false;
4643
4644         if ((*lcladv & advmsk) != tgtadv)
4645                 return false;
4646
4647         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4648                 u32 tg3_ctrl;
4649
4650                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4651
4652                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4653                         return false;
4654
4655                 if (tgtadv &&
4656                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4657                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4658                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4659                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4660                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4661                 } else {
4662                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4663                 }
4664
4665                 if (tg3_ctrl != tgtadv)
4666                         return false;
4667         }
4668
4669         return true;
4670 }
4671
4672 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4673 {
4674         u32 lpeth = 0;
4675
4676         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4677                 u32 val;
4678
4679                 if (tg3_readphy(tp, MII_STAT1000, &val))
4680                         return false;
4681
4682                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4683         }
4684
4685         if (tg3_readphy(tp, MII_LPA, rmtadv))
4686                 return false;
4687
4688         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4689         tp->link_config.rmt_adv = lpeth;
4690
4691         return true;
4692 }
4693
4694 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4695 {
4696         if (curr_link_up != tp->link_up) {
4697                 if (curr_link_up) {
4698                         netif_carrier_on(tp->dev);
4699                 } else {
4700                         netif_carrier_off(tp->dev);
4701                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4702                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4703                 }
4704
4705                 tg3_link_report(tp);
4706                 return true;
4707         }
4708
4709         return false;
4710 }
4711
4712 static void tg3_clear_mac_status(struct tg3 *tp)
4713 {
4714         tw32(MAC_EVENT, 0);
4715
4716         tw32_f(MAC_STATUS,
4717                MAC_STATUS_SYNC_CHANGED |
4718                MAC_STATUS_CFG_CHANGED |
4719                MAC_STATUS_MI_COMPLETION |
4720                MAC_STATUS_LNKSTATE_CHANGED);
4721         udelay(40);
4722 }
4723
4724 static void tg3_setup_eee(struct tg3 *tp)
4725 {
4726         u32 val;
4727
4728         val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4729               TG3_CPMU_EEE_LNKIDL_UART_IDL;
4730         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4731                 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4732
4733         tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4734
4735         tw32_f(TG3_CPMU_EEE_CTRL,
4736                TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4737
4738         val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4739               (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4740               TG3_CPMU_EEEMD_LPI_IN_RX |
4741               TG3_CPMU_EEEMD_EEE_ENABLE;
4742
4743         if (tg3_asic_rev(tp) != ASIC_REV_5717)
4744                 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4745
4746         if (tg3_flag(tp, ENABLE_APE))
4747                 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4748
4749         tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4750
4751         tw32_f(TG3_CPMU_EEE_DBTMR1,
4752                TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4753                (tp->eee.tx_lpi_timer & 0xffff));
4754
4755         tw32_f(TG3_CPMU_EEE_DBTMR2,
4756                TG3_CPMU_DBTMR2_APE_TX_2047US |
4757                TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4758 }
4759
4760 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4761 {
4762         bool current_link_up;
4763         u32 bmsr, val;
4764         u32 lcl_adv, rmt_adv;
4765         u16 current_speed;
4766         u8 current_duplex;
4767         int i, err;
4768
4769         tg3_clear_mac_status(tp);
4770
4771         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4772                 tw32_f(MAC_MI_MODE,
4773                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4774                 udelay(80);
4775         }
4776
4777         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4778
4779         /* Some third-party PHYs need to be reset on link going
4780          * down.
4781          */
4782         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4783              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4784              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4785             tp->link_up) {
4786                 tg3_readphy(tp, MII_BMSR, &bmsr);
4787                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4788                     !(bmsr & BMSR_LSTATUS))
4789                         force_reset = true;
4790         }
4791         if (force_reset)
4792                 tg3_phy_reset(tp);
4793
4794         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4795                 tg3_readphy(tp, MII_BMSR, &bmsr);
4796                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4797                     !tg3_flag(tp, INIT_COMPLETE))
4798                         bmsr = 0;
4799
4800                 if (!(bmsr & BMSR_LSTATUS)) {
4801                         err = tg3_init_5401phy_dsp(tp);
4802                         if (err)
4803                                 return err;
4804
4805                         tg3_readphy(tp, MII_BMSR, &bmsr);
4806                         for (i = 0; i < 1000; i++) {
4807                                 udelay(10);
4808                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4809                                     (bmsr & BMSR_LSTATUS)) {
4810                                         udelay(40);
4811                                         break;
4812                                 }
4813                         }
4814
4815                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4816                             TG3_PHY_REV_BCM5401_B0 &&
4817                             !(bmsr & BMSR_LSTATUS) &&
4818                             tp->link_config.active_speed == SPEED_1000) {
4819                                 err = tg3_phy_reset(tp);
4820                                 if (!err)
4821                                         err = tg3_init_5401phy_dsp(tp);
4822                                 if (err)
4823                                         return err;
4824                         }
4825                 }
4826         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4827                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4828                 /* 5701 {A0,B0} CRC bug workaround */
4829                 tg3_writephy(tp, 0x15, 0x0a75);
4830                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4831                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4832                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4833         }
4834
4835         /* Clear pending interrupts... */
4836         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4837         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4838
4839         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4840                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4841         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4842                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4843
4844         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4845             tg3_asic_rev(tp) == ASIC_REV_5701) {
4846                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4847                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4848                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4849                 else
4850                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4851         }
4852
4853         current_link_up = false;
4854         current_speed = SPEED_UNKNOWN;
4855         current_duplex = DUPLEX_UNKNOWN;
4856         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4857         tp->link_config.rmt_adv = 0;
4858
4859         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4860                 err = tg3_phy_auxctl_read(tp,
4861                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4862                                           &val);
4863                 if (!err && !(val & (1 << 10))) {
4864                         tg3_phy_auxctl_write(tp,
4865                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4866                                              val | (1 << 10));
4867                         goto relink;
4868                 }
4869         }
4870
4871         bmsr = 0;
4872         for (i = 0; i < 100; i++) {
4873                 tg3_readphy(tp, MII_BMSR, &bmsr);
4874                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4875                     (bmsr & BMSR_LSTATUS))
4876                         break;
4877                 udelay(40);
4878         }
4879
4880         if (bmsr & BMSR_LSTATUS) {
4881                 u32 aux_stat, bmcr;
4882
4883                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4884                 for (i = 0; i < 2000; i++) {
4885                         udelay(10);
4886                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4887                             aux_stat)
4888                                 break;
4889                 }
4890
4891                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4892                                              &current_speed,
4893                                              &current_duplex);
4894
4895                 bmcr = 0;
4896                 for (i = 0; i < 200; i++) {
4897                         tg3_readphy(tp, MII_BMCR, &bmcr);
4898                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4899                                 continue;
4900                         if (bmcr && bmcr != 0x7fff)
4901                                 break;
4902                         udelay(10);
4903                 }
4904
4905                 lcl_adv = 0;
4906                 rmt_adv = 0;
4907
4908                 tp->link_config.active_speed = current_speed;
4909                 tp->link_config.active_duplex = current_duplex;
4910
4911                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4912                         bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4913
4914                         if ((bmcr & BMCR_ANENABLE) &&
4915                             eee_config_ok &&
4916                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4917                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4918                                 current_link_up = true;
4919
4920                         /* EEE settings changes take effect only after a phy
4921                          * reset.  If we have skipped a reset due to Link Flap
4922                          * Avoidance being enabled, do it now.
4923                          */
4924                         if (!eee_config_ok &&
4925                             (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4926                             !force_reset) {
4927                                 tg3_setup_eee(tp);
4928                                 tg3_phy_reset(tp);
4929                         }
4930                 } else {
4931                         if (!(bmcr & BMCR_ANENABLE) &&
4932                             tp->link_config.speed == current_speed &&
4933                             tp->link_config.duplex == current_duplex) {
4934                                 current_link_up = true;
4935                         }
4936                 }
4937
4938                 if (current_link_up &&
4939                     tp->link_config.active_duplex == DUPLEX_FULL) {
4940                         u32 reg, bit;
4941
4942                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4943                                 reg = MII_TG3_FET_GEN_STAT;
4944                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4945                         } else {
4946                                 reg = MII_TG3_EXT_STAT;
4947                                 bit = MII_TG3_EXT_STAT_MDIX;
4948                         }
4949
4950                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4951                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4952
4953                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4954                 }
4955         }
4956
4957 relink:
4958         if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4959                 tg3_phy_copper_begin(tp);
4960
4961                 if (tg3_flag(tp, ROBOSWITCH)) {
4962                         current_link_up = true;
4963                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4964                         current_speed = SPEED_1000;
4965                         current_duplex = DUPLEX_FULL;
4966                         tp->link_config.active_speed = current_speed;
4967                         tp->link_config.active_duplex = current_duplex;
4968                 }
4969
4970                 tg3_readphy(tp, MII_BMSR, &bmsr);
4971                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4972                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4973                         current_link_up = true;
4974         }
4975
4976         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4977         if (current_link_up) {
4978                 if (tp->link_config.active_speed == SPEED_100 ||
4979                     tp->link_config.active_speed == SPEED_10)
4980                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4981                 else
4982                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4983         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4984                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4985         else
4986                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4987
4988         /* In order for the 5750 core in BCM4785 chip to work properly
4989          * in RGMII mode, the Led Control Register must be set up.
4990          */
4991         if (tg3_flag(tp, RGMII_MODE)) {
4992                 u32 led_ctrl = tr32(MAC_LED_CTRL);
4993                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4994
4995                 if (tp->link_config.active_speed == SPEED_10)
4996                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4997                 else if (tp->link_config.active_speed == SPEED_100)
4998                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4999                                      LED_CTRL_100MBPS_ON);
5000                 else if (tp->link_config.active_speed == SPEED_1000)
5001                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5002                                      LED_CTRL_1000MBPS_ON);
5003
5004                 tw32(MAC_LED_CTRL, led_ctrl);
5005                 udelay(40);
5006         }
5007
5008         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5009         if (tp->link_config.active_duplex == DUPLEX_HALF)
5010                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5011
5012         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5013                 if (current_link_up &&
5014                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5015                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5016                 else
5017                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5018         }
5019
5020         /* ??? Without this setting Netgear GA302T PHY does not
5021          * ??? send/receive packets...
5022          */
5023         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5024             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5025                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5026                 tw32_f(MAC_MI_MODE, tp->mi_mode);
5027                 udelay(80);
5028         }
5029
5030         tw32_f(MAC_MODE, tp->mac_mode);
5031         udelay(40);
5032
5033         tg3_phy_eee_adjust(tp, current_link_up);
5034
5035         if (tg3_flag(tp, USE_LINKCHG_REG)) {
5036                 /* Polled via timer. */
5037                 tw32_f(MAC_EVENT, 0);
5038         } else {
5039                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5040         }
5041         udelay(40);
5042
5043         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5044             current_link_up &&
5045             tp->link_config.active_speed == SPEED_1000 &&
5046             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5047                 udelay(120);
5048                 tw32_f(MAC_STATUS,
5049                      (MAC_STATUS_SYNC_CHANGED |
5050                       MAC_STATUS_CFG_CHANGED));
5051                 udelay(40);
5052                 tg3_write_mem(tp,
5053                               NIC_SRAM_FIRMWARE_MBOX,
5054                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5055         }
5056
5057         /* Prevent send BD corruption. */
5058         if (tg3_flag(tp, CLKREQ_BUG)) {
5059                 if (tp->link_config.active_speed == SPEED_100 ||
5060                     tp->link_config.active_speed == SPEED_10)
5061                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5062                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
5063                 else
5064                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5065                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
5066         }
5067
5068         tg3_test_and_report_link_chg(tp, current_link_up);
5069
5070         return 0;
5071 }
5072
5073 struct tg3_fiber_aneginfo {
5074         int state;
5075 #define ANEG_STATE_UNKNOWN              0
5076 #define ANEG_STATE_AN_ENABLE            1
5077 #define ANEG_STATE_RESTART_INIT         2
5078 #define ANEG_STATE_RESTART              3
5079 #define ANEG_STATE_DISABLE_LINK_OK      4
5080 #define ANEG_STATE_ABILITY_DETECT_INIT  5
5081 #define ANEG_STATE_ABILITY_DETECT       6
5082 #define ANEG_STATE_ACK_DETECT_INIT      7
5083 #define ANEG_STATE_ACK_DETECT           8
5084 #define ANEG_STATE_COMPLETE_ACK_INIT    9
5085 #define ANEG_STATE_COMPLETE_ACK         10
5086 #define ANEG_STATE_IDLE_DETECT_INIT     11
5087 #define ANEG_STATE_IDLE_DETECT          12
5088 #define ANEG_STATE_LINK_OK              13
5089 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
5090 #define ANEG_STATE_NEXT_PAGE_WAIT       15
5091
5092         u32 flags;
5093 #define MR_AN_ENABLE            0x00000001
5094 #define MR_RESTART_AN           0x00000002
5095 #define MR_AN_COMPLETE          0x00000004
5096 #define MR_PAGE_RX              0x00000008
5097 #define MR_NP_LOADED            0x00000010
5098 #define MR_TOGGLE_TX            0x00000020
5099 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
5100 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
5101 #define MR_LP_ADV_SYM_PAUSE     0x00000100
5102 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
5103 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5104 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5105 #define MR_LP_ADV_NEXT_PAGE     0x00001000
5106 #define MR_TOGGLE_RX            0x00002000
5107 #define MR_NP_RX                0x00004000
5108
5109 #define MR_LINK_OK              0x80000000
5110
5111         unsigned long link_time, cur_time;
5112
5113         u32 ability_match_cfg;
5114         int ability_match_count;
5115
5116         char ability_match, idle_match, ack_match;
5117
5118         u32 txconfig, rxconfig;
5119 #define ANEG_CFG_NP             0x00000080
5120 #define ANEG_CFG_ACK            0x00000040
5121 #define ANEG_CFG_RF2            0x00000020
5122 #define ANEG_CFG_RF1            0x00000010
5123 #define ANEG_CFG_PS2            0x00000001
5124 #define ANEG_CFG_PS1            0x00008000
5125 #define ANEG_CFG_HD             0x00004000
5126 #define ANEG_CFG_FD             0x00002000
5127 #define ANEG_CFG_INVAL          0x00001f06
5128
5129 };
5130 #define ANEG_OK         0
5131 #define ANEG_DONE       1
5132 #define ANEG_TIMER_ENAB 2
5133 #define ANEG_FAILED     -1
5134
5135 #define ANEG_STATE_SETTLE_TIME  10000
5136
5137 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5138                                    struct tg3_fiber_aneginfo *ap)
5139 {
5140         u16 flowctrl;
5141         unsigned long delta;
5142         u32 rx_cfg_reg;
5143         int ret;
5144
5145         if (ap->state == ANEG_STATE_UNKNOWN) {
5146                 ap->rxconfig = 0;
5147                 ap->link_time = 0;
5148                 ap->cur_time = 0;
5149                 ap->ability_match_cfg = 0;
5150                 ap->ability_match_count = 0;
5151                 ap->ability_match = 0;
5152                 ap->idle_match = 0;
5153                 ap->ack_match = 0;
5154         }
5155         ap->cur_time++;
5156
5157         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5158                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5159
5160                 if (rx_cfg_reg != ap->ability_match_cfg) {
5161                         ap->ability_match_cfg = rx_cfg_reg;
5162                         ap->ability_match = 0;
5163                         ap->ability_match_count = 0;
5164                 } else {
5165                         if (++ap->ability_match_count > 1) {
5166                                 ap->ability_match = 1;
5167                                 ap->ability_match_cfg = rx_cfg_reg;
5168                         }
5169                 }
5170                 if (rx_cfg_reg & ANEG_CFG_ACK)
5171                         ap->ack_match = 1;
5172                 else
5173                         ap->ack_match = 0;
5174
5175                 ap->idle_match = 0;
5176         } else {
5177                 ap->idle_match = 1;
5178                 ap->ability_match_cfg = 0;
5179                 ap->ability_match_count = 0;
5180                 ap->ability_match = 0;
5181                 ap->ack_match = 0;
5182
5183                 rx_cfg_reg = 0;
5184         }
5185
5186         ap->rxconfig = rx_cfg_reg;
5187         ret = ANEG_OK;
5188
5189         switch (ap->state) {
5190         case ANEG_STATE_UNKNOWN:
5191                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5192                         ap->state = ANEG_STATE_AN_ENABLE;
5193
5194                 /* fallthru */
5195         case ANEG_STATE_AN_ENABLE:
5196                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5197                 if (ap->flags & MR_AN_ENABLE) {
5198                         ap->link_time = 0;
5199                         ap->cur_time = 0;
5200                         ap->ability_match_cfg = 0;
5201                         ap->ability_match_count = 0;
5202                         ap->ability_match = 0;
5203                         ap->idle_match = 0;
5204                         ap->ack_match = 0;
5205
5206                         ap->state = ANEG_STATE_RESTART_INIT;
5207                 } else {
5208                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
5209                 }
5210                 break;
5211
5212         case ANEG_STATE_RESTART_INIT:
5213                 ap->link_time = ap->cur_time;
5214                 ap->flags &= ~(MR_NP_LOADED);
5215                 ap->txconfig = 0;
5216                 tw32(MAC_TX_AUTO_NEG, 0);
5217                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5218                 tw32_f(MAC_MODE, tp->mac_mode);
5219                 udelay(40);
5220
5221                 ret = ANEG_TIMER_ENAB;
5222                 ap->state = ANEG_STATE_RESTART;
5223
5224                 /* fallthru */
5225         case ANEG_STATE_RESTART:
5226                 delta = ap->cur_time - ap->link_time;
5227                 if (delta > ANEG_STATE_SETTLE_TIME)
5228                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5229                 else
5230                         ret = ANEG_TIMER_ENAB;
5231                 break;
5232
5233         case ANEG_STATE_DISABLE_LINK_OK:
5234                 ret = ANEG_DONE;
5235                 break;
5236
5237         case ANEG_STATE_ABILITY_DETECT_INIT:
5238                 ap->flags &= ~(MR_TOGGLE_TX);
5239                 ap->txconfig = ANEG_CFG_FD;
5240                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5241                 if (flowctrl & ADVERTISE_1000XPAUSE)
5242                         ap->txconfig |= ANEG_CFG_PS1;
5243                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5244                         ap->txconfig |= ANEG_CFG_PS2;
5245                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5246                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5247                 tw32_f(MAC_MODE, tp->mac_mode);
5248                 udelay(40);
5249
5250                 ap->state = ANEG_STATE_ABILITY_DETECT;
5251                 break;
5252
5253         case ANEG_STATE_ABILITY_DETECT:
5254                 if (ap->ability_match != 0 && ap->rxconfig != 0)
5255                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
5256                 break;
5257
5258         case ANEG_STATE_ACK_DETECT_INIT:
5259                 ap->txconfig |= ANEG_CFG_ACK;
5260                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5261                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5262                 tw32_f(MAC_MODE, tp->mac_mode);
5263                 udelay(40);
5264
5265                 ap->state = ANEG_STATE_ACK_DETECT;
5266
5267                 /* fallthru */
5268         case ANEG_STATE_ACK_DETECT:
5269                 if (ap->ack_match != 0) {
5270                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5271                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5272                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5273                         } else {
5274                                 ap->state = ANEG_STATE_AN_ENABLE;
5275                         }
5276                 } else if (ap->ability_match != 0 &&
5277                            ap->rxconfig == 0) {
5278                         ap->state = ANEG_STATE_AN_ENABLE;
5279                 }
5280                 break;
5281
5282         case ANEG_STATE_COMPLETE_ACK_INIT:
5283                 if (ap->rxconfig & ANEG_CFG_INVAL) {
5284                         ret = ANEG_FAILED;
5285                         break;
5286                 }
5287                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5288                                MR_LP_ADV_HALF_DUPLEX |
5289                                MR_LP_ADV_SYM_PAUSE |
5290                                MR_LP_ADV_ASYM_PAUSE |
5291                                MR_LP_ADV_REMOTE_FAULT1 |
5292                                MR_LP_ADV_REMOTE_FAULT2 |
5293                                MR_LP_ADV_NEXT_PAGE |
5294                                MR_TOGGLE_RX |
5295                                MR_NP_RX);
5296                 if (ap->rxconfig & ANEG_CFG_FD)
5297                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5298                 if (ap->rxconfig & ANEG_CFG_HD)
5299                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5300                 if (ap->rxconfig & ANEG_CFG_PS1)
5301                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
5302                 if (ap->rxconfig & ANEG_CFG_PS2)
5303                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5304                 if (ap->rxconfig & ANEG_CFG_RF1)
5305                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5306                 if (ap->rxconfig & ANEG_CFG_RF2)
5307                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5308                 if (ap->rxconfig & ANEG_CFG_NP)
5309                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
5310
5311                 ap->link_time = ap->cur_time;
5312
5313                 ap->flags ^= (MR_TOGGLE_TX);
5314                 if (ap->rxconfig & 0x0008)
5315                         ap->flags |= MR_TOGGLE_RX;
5316                 if (ap->rxconfig & ANEG_CFG_NP)
5317                         ap->flags |= MR_NP_RX;
5318                 ap->flags |= MR_PAGE_RX;
5319
5320                 ap->state = ANEG_STATE_COMPLETE_ACK;
5321                 ret = ANEG_TIMER_ENAB;
5322                 break;
5323
5324         case ANEG_STATE_COMPLETE_ACK:
5325                 if (ap->ability_match != 0 &&
5326                     ap->rxconfig == 0) {
5327                         ap->state = ANEG_STATE_AN_ENABLE;
5328                         break;
5329                 }
5330                 delta = ap->cur_time - ap->link_time;
5331                 if (delta > ANEG_STATE_SETTLE_TIME) {
5332                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5333                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5334                         } else {
5335                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5336                                     !(ap->flags & MR_NP_RX)) {
5337                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5338                                 } else {
5339                                         ret = ANEG_FAILED;
5340                                 }
5341                         }
5342                 }
5343                 break;
5344
5345         case ANEG_STATE_IDLE_DETECT_INIT:
5346                 ap->link_time = ap->cur_time;
5347                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5348                 tw32_f(MAC_MODE, tp->mac_mode);
5349                 udelay(40);
5350
5351                 ap->state = ANEG_STATE_IDLE_DETECT;
5352                 ret = ANEG_TIMER_ENAB;
5353                 break;
5354
5355         case ANEG_STATE_IDLE_DETECT:
5356                 if (ap->ability_match != 0 &&
5357                     ap->rxconfig == 0) {
5358                         ap->state = ANEG_STATE_AN_ENABLE;
5359                         break;
5360                 }
5361                 delta = ap->cur_time - ap->link_time;
5362                 if (delta > ANEG_STATE_SETTLE_TIME) {
5363                         /* XXX another gem from the Broadcom driver :( */
5364                         ap->state = ANEG_STATE_LINK_OK;
5365                 }
5366                 break;
5367
5368         case ANEG_STATE_LINK_OK:
5369                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5370                 ret = ANEG_DONE;
5371                 break;
5372
5373         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5374                 /* ??? unimplemented */
5375                 break;
5376
5377         case ANEG_STATE_NEXT_PAGE_WAIT:
5378                 /* ??? unimplemented */
5379                 break;
5380
5381         default:
5382                 ret = ANEG_FAILED;
5383                 break;
5384         }
5385
5386         return ret;
5387 }
5388
5389 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5390 {
5391         int res = 0;
5392         struct tg3_fiber_aneginfo aninfo;
5393         int status = ANEG_FAILED;
5394         unsigned int tick;
5395         u32 tmp;
5396
5397         tw32_f(MAC_TX_AUTO_NEG, 0);
5398
5399         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5400         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5401         udelay(40);
5402
5403         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5404         udelay(40);
5405
5406         memset(&aninfo, 0, sizeof(aninfo));
5407         aninfo.flags |= MR_AN_ENABLE;
5408         aninfo.state = ANEG_STATE_UNKNOWN;
5409         aninfo.cur_time = 0;
5410         tick = 0;
5411         while (++tick < 195000) {
5412                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5413                 if (status == ANEG_DONE || status == ANEG_FAILED)
5414                         break;
5415
5416                 udelay(1);
5417         }
5418
5419         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5420         tw32_f(MAC_MODE, tp->mac_mode);
5421         udelay(40);
5422
5423         *txflags = aninfo.txconfig;
5424         *rxflags = aninfo.flags;
5425
5426         if (status == ANEG_DONE &&
5427             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5428                              MR_LP_ADV_FULL_DUPLEX)))
5429                 res = 1;
5430
5431         return res;
5432 }
5433
5434 static void tg3_init_bcm8002(struct tg3 *tp)
5435 {
5436         u32 mac_status = tr32(MAC_STATUS);
5437         int i;
5438
5439         /* Reset when initting first time or we have a link. */
5440         if (tg3_flag(tp, INIT_COMPLETE) &&
5441             !(mac_status & MAC_STATUS_PCS_SYNCED))
5442                 return;
5443
5444         /* Set PLL lock range. */
5445         tg3_writephy(tp, 0x16, 0x8007);
5446
5447         /* SW reset */
5448         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5449
5450         /* Wait for reset to complete. */
5451         /* XXX schedule_timeout() ... */
5452         for (i = 0; i < 500; i++)
5453                 udelay(10);
5454
5455         /* Config mode; select PMA/Ch 1 regs. */
5456         tg3_writephy(tp, 0x10, 0x8411);
5457
5458         /* Enable auto-lock and comdet, select txclk for tx. */
5459         tg3_writephy(tp, 0x11, 0x0a10);
5460
5461         tg3_writephy(tp, 0x18, 0x00a0);
5462         tg3_writephy(tp, 0x16, 0x41ff);
5463
5464         /* Assert and deassert POR. */
5465         tg3_writephy(tp, 0x13, 0x0400);
5466         udelay(40);
5467         tg3_writephy(tp, 0x13, 0x0000);
5468
5469         tg3_writephy(tp, 0x11, 0x0a50);
5470         udelay(40);
5471         tg3_writephy(tp, 0x11, 0x0a10);
5472
5473         /* Wait for signal to stabilize */
5474         /* XXX schedule_timeout() ... */
5475         for (i = 0; i < 15000; i++)
5476                 udelay(10);
5477
5478         /* Deselect the channel register so we can read the PHYID
5479          * later.
5480          */
5481         tg3_writephy(tp, 0x10, 0x8011);
5482 }
5483
5484 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5485 {
5486         u16 flowctrl;
5487         bool current_link_up;
5488         u32 sg_dig_ctrl, sg_dig_status;
5489         u32 serdes_cfg, expected_sg_dig_ctrl;
5490         int workaround, port_a;
5491
5492         serdes_cfg = 0;
5493         expected_sg_dig_ctrl = 0;
5494         workaround = 0;
5495         port_a = 1;
5496         current_link_up = false;
5497
5498         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5499             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5500                 workaround = 1;
5501                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5502                         port_a = 0;
5503
5504                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5505                 /* preserve bits 20-23 for voltage regulator */
5506                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5507         }
5508
5509         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5510
5511         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5512                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5513                         if (workaround) {
5514                                 u32 val = serdes_cfg;
5515
5516                                 if (port_a)
5517                                         val |= 0xc010000;
5518                                 else
5519                                         val |= 0x4010000;
5520                                 tw32_f(MAC_SERDES_CFG, val);
5521                         }
5522
5523                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5524                 }
5525                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5526                         tg3_setup_flow_control(tp, 0, 0);
5527                         current_link_up = true;
5528                 }
5529                 goto out;
5530         }
5531
5532         /* Want auto-negotiation.  */
5533         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5534
5535         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5536         if (flowctrl & ADVERTISE_1000XPAUSE)
5537                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5538         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5539                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5540
5541         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5542                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5543                     tp->serdes_counter &&
5544                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5545                                     MAC_STATUS_RCVD_CFG)) ==
5546                      MAC_STATUS_PCS_SYNCED)) {
5547                         tp->serdes_counter--;
5548                         current_link_up = true;
5549                         goto out;
5550                 }
5551 restart_autoneg:
5552                 if (workaround)
5553                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5554                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5555                 udelay(5);
5556                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5557
5558                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5559                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5560         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5561                                  MAC_STATUS_SIGNAL_DET)) {
5562                 sg_dig_status = tr32(SG_DIG_STATUS);
5563                 mac_status = tr32(MAC_STATUS);
5564
5565                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5566                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5567                         u32 local_adv = 0, remote_adv = 0;
5568
5569                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5570                                 local_adv |= ADVERTISE_1000XPAUSE;
5571                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5572                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5573
5574                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5575                                 remote_adv |= LPA_1000XPAUSE;
5576                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5577                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5578
5579                         tp->link_config.rmt_adv =
5580                                            mii_adv_to_ethtool_adv_x(remote_adv);
5581
5582                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5583                         current_link_up = true;
5584                         tp->serdes_counter = 0;
5585                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5586                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5587                         if (tp->serdes_counter)
5588                                 tp->serdes_counter--;
5589                         else {
5590                                 if (workaround) {
5591                                         u32 val = serdes_cfg;
5592
5593                                         if (port_a)
5594                                                 val |= 0xc010000;
5595                                         else
5596                                                 val |= 0x4010000;
5597
5598                                         tw32_f(MAC_SERDES_CFG, val);
5599                                 }
5600
5601                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5602                                 udelay(40);
5603
5604                                 /* Link parallel detection - link is up */
5605                                 /* only if we have PCS_SYNC and not */
5606                                 /* receiving config code words */
5607                                 mac_status = tr32(MAC_STATUS);
5608                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5609                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5610                                         tg3_setup_flow_control(tp, 0, 0);
5611                                         current_link_up = true;
5612                                         tp->phy_flags |=
5613                                                 TG3_PHYFLG_PARALLEL_DETECT;
5614                                         tp->serdes_counter =
5615                                                 SERDES_PARALLEL_DET_TIMEOUT;
5616                                 } else
5617                                         goto restart_autoneg;
5618                         }
5619                 }
5620         } else {
5621                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5622                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5623         }
5624
5625 out:
5626         return current_link_up;
5627 }
5628
5629 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5630 {
5631         bool current_link_up = false;
5632
5633         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5634                 goto out;
5635
5636         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5637                 u32 txflags, rxflags;
5638                 int i;
5639
5640                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5641                         u32 local_adv = 0, remote_adv = 0;
5642
5643                         if (txflags & ANEG_CFG_PS1)
5644                                 local_adv |= ADVERTISE_1000XPAUSE;
5645                         if (txflags & ANEG_CFG_PS2)
5646                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5647
5648                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5649                                 remote_adv |= LPA_1000XPAUSE;
5650                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5651                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5652
5653                         tp->link_config.rmt_adv =
5654                                            mii_adv_to_ethtool_adv_x(remote_adv);
5655
5656                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5657
5658                         current_link_up = true;
5659                 }
5660                 for (i = 0; i < 30; i++) {
5661                         udelay(20);
5662                         tw32_f(MAC_STATUS,
5663                                (MAC_STATUS_SYNC_CHANGED |
5664                                 MAC_STATUS_CFG_CHANGED));
5665                         udelay(40);
5666                         if ((tr32(MAC_STATUS) &
5667                              (MAC_STATUS_SYNC_CHANGED |
5668                               MAC_STATUS_CFG_CHANGED)) == 0)
5669                                 break;
5670                 }
5671
5672                 mac_status = tr32(MAC_STATUS);
5673                 if (!current_link_up &&
5674                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5675                     !(mac_status & MAC_STATUS_RCVD_CFG))
5676                         current_link_up = true;
5677         } else {
5678                 tg3_setup_flow_control(tp, 0, 0);
5679
5680                 /* Forcing 1000FD link up. */
5681                 current_link_up = true;
5682
5683                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5684                 udelay(40);
5685
5686                 tw32_f(MAC_MODE, tp->mac_mode);
5687                 udelay(40);
5688         }
5689
5690 out:
5691         return current_link_up;
5692 }
5693
5694 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5695 {
5696         u32 orig_pause_cfg;
5697         u16 orig_active_speed;
5698         u8 orig_active_duplex;
5699         u32 mac_status;
5700         bool current_link_up;
5701         int i;
5702
5703         orig_pause_cfg = tp->link_config.active_flowctrl;
5704         orig_active_speed = tp->link_config.active_speed;
5705         orig_active_duplex = tp->link_config.active_duplex;
5706
5707         if (!tg3_flag(tp, HW_AUTONEG) &&
5708             tp->link_up &&
5709             tg3_flag(tp, INIT_COMPLETE)) {
5710                 mac_status = tr32(MAC_STATUS);
5711                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5712                                MAC_STATUS_SIGNAL_DET |
5713                                MAC_STATUS_CFG_CHANGED |
5714                                MAC_STATUS_RCVD_CFG);
5715                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5716                                    MAC_STATUS_SIGNAL_DET)) {
5717                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5718                                             MAC_STATUS_CFG_CHANGED));
5719                         return 0;
5720                 }
5721         }
5722
5723         tw32_f(MAC_TX_AUTO_NEG, 0);
5724
5725         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5726         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5727         tw32_f(MAC_MODE, tp->mac_mode);
5728         udelay(40);
5729
5730         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5731                 tg3_init_bcm8002(tp);
5732
5733         /* Enable link change event even when serdes polling.  */
5734         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5735         udelay(40);
5736
5737         current_link_up = false;
5738         tp->link_config.rmt_adv = 0;
5739         mac_status = tr32(MAC_STATUS);
5740
5741         if (tg3_flag(tp, HW_AUTONEG))
5742                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5743         else
5744                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5745
5746         tp->napi[0].hw_status->status =
5747                 (SD_STATUS_UPDATED |
5748                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5749
5750         for (i = 0; i < 100; i++) {
5751                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5752                                     MAC_STATUS_CFG_CHANGED));
5753                 udelay(5);
5754                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5755                                          MAC_STATUS_CFG_CHANGED |
5756                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5757                         break;
5758         }
5759
5760         mac_status = tr32(MAC_STATUS);
5761         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5762                 current_link_up = false;
5763                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5764                     tp->serdes_counter == 0) {
5765                         tw32_f(MAC_MODE, (tp->mac_mode |
5766                                           MAC_MODE_SEND_CONFIGS));
5767                         udelay(1);
5768                         tw32_f(MAC_MODE, tp->mac_mode);
5769                 }
5770         }
5771
5772         if (current_link_up) {
5773                 tp->link_config.active_speed = SPEED_1000;
5774                 tp->link_config.active_duplex = DUPLEX_FULL;
5775                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5776                                     LED_CTRL_LNKLED_OVERRIDE |
5777                                     LED_CTRL_1000MBPS_ON));
5778         } else {
5779                 tp->link_config.active_speed = SPEED_UNKNOWN;
5780                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5781                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5782                                     LED_CTRL_LNKLED_OVERRIDE |
5783                                     LED_CTRL_TRAFFIC_OVERRIDE));
5784         }
5785
5786         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5787                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5788                 if (orig_pause_cfg != now_pause_cfg ||
5789                     orig_active_speed != tp->link_config.active_speed ||
5790                     orig_active_duplex != tp->link_config.active_duplex)
5791                         tg3_link_report(tp);
5792         }
5793
5794         return 0;
5795 }
5796
5797 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5798 {
5799         int err = 0;
5800         u32 bmsr, bmcr;
5801         u16 current_speed = SPEED_UNKNOWN;
5802         u8 current_duplex = DUPLEX_UNKNOWN;
5803         bool current_link_up = false;
5804         u32 local_adv, remote_adv, sgsr;
5805
5806         if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5807              tg3_asic_rev(tp) == ASIC_REV_5720) &&
5808              !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5809              (sgsr & SERDES_TG3_SGMII_MODE)) {
5810
5811                 if (force_reset)
5812                         tg3_phy_reset(tp);
5813
5814                 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5815
5816                 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5817                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5818                 } else {
5819                         current_link_up = true;
5820                         if (sgsr & SERDES_TG3_SPEED_1000) {
5821                                 current_speed = SPEED_1000;
5822                                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5823                         } else if (sgsr & SERDES_TG3_SPEED_100) {
5824                                 current_speed = SPEED_100;
5825                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5826                         } else {
5827                                 current_speed = SPEED_10;
5828                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5829                         }
5830
5831                         if (sgsr & SERDES_TG3_FULL_DUPLEX)
5832                                 current_duplex = DUPLEX_FULL;
5833                         else
5834                                 current_duplex = DUPLEX_HALF;
5835                 }
5836
5837                 tw32_f(MAC_MODE, tp->mac_mode);
5838                 udelay(40);
5839
5840                 tg3_clear_mac_status(tp);
5841
5842                 goto fiber_setup_done;
5843         }
5844
5845         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5846         tw32_f(MAC_MODE, tp->mac_mode);
5847         udelay(40);
5848
5849         tg3_clear_mac_status(tp);
5850
5851         if (force_reset)
5852                 tg3_phy_reset(tp);
5853
5854         tp->link_config.rmt_adv = 0;
5855
5856         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5857         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5858         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5859                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5860                         bmsr |= BMSR_LSTATUS;
5861                 else
5862                         bmsr &= ~BMSR_LSTATUS;
5863         }
5864
5865         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5866
5867         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5868             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5869                 /* do nothing, just check for link up at the end */
5870         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5871                 u32 adv, newadv;
5872
5873                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5874                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5875                                  ADVERTISE_1000XPAUSE |
5876                                  ADVERTISE_1000XPSE_ASYM |
5877                                  ADVERTISE_SLCT);
5878
5879                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5880                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5881
5882                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5883                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5884                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5885                         tg3_writephy(tp, MII_BMCR, bmcr);
5886
5887                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5888                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5889                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5890
5891                         return err;
5892                 }
5893         } else {
5894                 u32 new_bmcr;
5895
5896                 bmcr &= ~BMCR_SPEED1000;
5897                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5898
5899                 if (tp->link_config.duplex == DUPLEX_FULL)
5900                         new_bmcr |= BMCR_FULLDPLX;
5901
5902                 if (new_bmcr != bmcr) {
5903                         /* BMCR_SPEED1000 is a reserved bit that needs
5904                          * to be set on write.
5905                          */
5906                         new_bmcr |= BMCR_SPEED1000;
5907
5908                         /* Force a linkdown */
5909                         if (tp->link_up) {
5910                                 u32 adv;
5911
5912                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5913                                 adv &= ~(ADVERTISE_1000XFULL |
5914                                          ADVERTISE_1000XHALF |
5915                                          ADVERTISE_SLCT);
5916                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5917                                 tg3_writephy(tp, MII_BMCR, bmcr |
5918                                                            BMCR_ANRESTART |
5919                                                            BMCR_ANENABLE);
5920                                 udelay(10);
5921                                 tg3_carrier_off(tp);
5922                         }
5923                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5924                         bmcr = new_bmcr;
5925                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5926                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5927                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5928                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5929                                         bmsr |= BMSR_LSTATUS;
5930                                 else
5931                                         bmsr &= ~BMSR_LSTATUS;
5932                         }
5933                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5934                 }
5935         }
5936
5937         if (bmsr & BMSR_LSTATUS) {
5938                 current_speed = SPEED_1000;
5939                 current_link_up = true;
5940                 if (bmcr & BMCR_FULLDPLX)
5941                         current_duplex = DUPLEX_FULL;
5942                 else
5943                         current_duplex = DUPLEX_HALF;
5944
5945                 local_adv = 0;
5946                 remote_adv = 0;
5947
5948                 if (bmcr & BMCR_ANENABLE) {
5949                         u32 common;
5950
5951                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5952                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5953                         common = local_adv & remote_adv;
5954                         if (common & (ADVERTISE_1000XHALF |
5955                                       ADVERTISE_1000XFULL)) {
5956                                 if (common & ADVERTISE_1000XFULL)
5957                                         current_duplex = DUPLEX_FULL;
5958                                 else
5959                                         current_duplex = DUPLEX_HALF;
5960
5961                                 tp->link_config.rmt_adv =
5962                                            mii_adv_to_ethtool_adv_x(remote_adv);
5963                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5964                                 /* Link is up via parallel detect */
5965                         } else {
5966                                 current_link_up = false;
5967                         }
5968                 }
5969         }
5970
5971 fiber_setup_done:
5972         if (current_link_up && current_duplex == DUPLEX_FULL)
5973                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5974
5975         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5976         if (tp->link_config.active_duplex == DUPLEX_HALF)
5977                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5978
5979         tw32_f(MAC_MODE, tp->mac_mode);
5980         udelay(40);
5981
5982         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5983
5984         tp->link_config.active_speed = current_speed;
5985         tp->link_config.active_duplex = current_duplex;
5986
5987         tg3_test_and_report_link_chg(tp, current_link_up);
5988         return err;
5989 }
5990
5991 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5992 {
5993         if (tp->serdes_counter) {
5994                 /* Give autoneg time to complete. */
5995                 tp->serdes_counter--;
5996                 return;
5997         }
5998
5999         if (!tp->link_up &&
6000             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6001                 u32 bmcr;
6002
6003                 tg3_readphy(tp, MII_BMCR, &bmcr);
6004                 if (bmcr & BMCR_ANENABLE) {
6005                         u32 phy1, phy2;
6006
6007                         /* Select shadow register 0x1f */
6008                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6009                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6010
6011                         /* Select expansion interrupt status register */
6012                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6013                                          MII_TG3_DSP_EXP1_INT_STAT);
6014                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6015                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6016
6017                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6018                                 /* We have signal detect and not receiving
6019                                  * config code words, link is up by parallel
6020                                  * detection.
6021                                  */
6022
6023                                 bmcr &= ~BMCR_ANENABLE;
6024                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6025                                 tg3_writephy(tp, MII_BMCR, bmcr);
6026                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6027                         }
6028                 }
6029         } else if (tp->link_up &&
6030                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6031                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6032                 u32 phy2;
6033
6034                 /* Select expansion interrupt status register */
6035                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6036                                  MII_TG3_DSP_EXP1_INT_STAT);
6037                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6038                 if (phy2 & 0x20) {
6039                         u32 bmcr;
6040
6041                         /* Config code words received, turn on autoneg. */
6042                         tg3_readphy(tp, MII_BMCR, &bmcr);
6043                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6044
6045                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6046
6047                 }
6048         }
6049 }
6050
6051 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6052 {
6053         u32 val;
6054         int err;
6055
6056         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6057                 err = tg3_setup_fiber_phy(tp, force_reset);
6058         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6059                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6060         else
6061                 err = tg3_setup_copper_phy(tp, force_reset);
6062
6063         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6064                 u32 scale;
6065
6066                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6067                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6068                         scale = 65;
6069                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6070                         scale = 6;
6071                 else
6072                         scale = 12;
6073
6074                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6075                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6076                 tw32(GRC_MISC_CFG, val);
6077         }
6078
6079         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6080               (6 << TX_LENGTHS_IPG_SHIFT);
6081         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6082             tg3_asic_rev(tp) == ASIC_REV_5762)
6083                 val |= tr32(MAC_TX_LENGTHS) &
6084                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
6085                         TX_LENGTHS_CNT_DWN_VAL_MSK);
6086
6087         if (tp->link_config.active_speed == SPEED_1000 &&
6088             tp->link_config.active_duplex == DUPLEX_HALF)
6089                 tw32(MAC_TX_LENGTHS, val |
6090                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6091         else
6092                 tw32(MAC_TX_LENGTHS, val |
6093                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6094
6095         if (!tg3_flag(tp, 5705_PLUS)) {
6096                 if (tp->link_up) {
6097                         tw32(HOSTCC_STAT_COAL_TICKS,
6098                              tp->coal.stats_block_coalesce_usecs);
6099                 } else {
6100                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
6101                 }
6102         }
6103
6104         if (tg3_flag(tp, ASPM_WORKAROUND)) {
6105                 val = tr32(PCIE_PWR_MGMT_THRESH);
6106                 if (!tp->link_up)
6107                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6108                               tp->pwrmgmt_thresh;
6109                 else
6110                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6111                 tw32(PCIE_PWR_MGMT_THRESH, val);
6112         }
6113
6114         return err;
6115 }
6116
6117 /* tp->lock must be held */
6118 static u64 tg3_refclk_read(struct tg3 *tp)
6119 {
6120         u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6121         return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6122 }
6123
6124 /* tp->lock must be held */
6125 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6126 {
6127         u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6128
6129         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6130         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6131         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6132         tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6133 }
6134
6135 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6136 static inline void tg3_full_unlock(struct tg3 *tp);
6137 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6138 {
6139         struct tg3 *tp = netdev_priv(dev);
6140
6141         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6142                                 SOF_TIMESTAMPING_RX_SOFTWARE |
6143                                 SOF_TIMESTAMPING_SOFTWARE;
6144
6145         if (tg3_flag(tp, PTP_CAPABLE)) {
6146                 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6147                                         SOF_TIMESTAMPING_RX_HARDWARE |
6148                                         SOF_TIMESTAMPING_RAW_HARDWARE;
6149         }
6150
6151         if (tp->ptp_clock)
6152                 info->phc_index = ptp_clock_index(tp->ptp_clock);
6153         else
6154                 info->phc_index = -1;
6155
6156         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6157
6158         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6159                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6160                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6161                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6162         return 0;
6163 }
6164
6165 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6166 {
6167         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6168         bool neg_adj = false;
6169         u32 correction = 0;
6170
6171         if (ppb < 0) {
6172                 neg_adj = true;
6173                 ppb = -ppb;
6174         }
6175
6176         /* Frequency adjustment is performed using hardware with a 24 bit
6177          * accumulator and a programmable correction value. On each clk, the
6178          * correction value gets added to the accumulator and when it
6179          * overflows, the time counter is incremented/decremented.
6180          *
6181          * So conversion from ppb to correction value is
6182          *              ppb * (1 << 24) / 1000000000
6183          */
6184         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6185                      TG3_EAV_REF_CLK_CORRECT_MASK;
6186
6187         tg3_full_lock(tp, 0);
6188
6189         if (correction)
6190                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6191                      TG3_EAV_REF_CLK_CORRECT_EN |
6192                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6193         else
6194                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6195
6196         tg3_full_unlock(tp);
6197
6198         return 0;
6199 }
6200
6201 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6202 {
6203         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6204
6205         tg3_full_lock(tp, 0);
6206         tp->ptp_adjust += delta;
6207         tg3_full_unlock(tp);
6208
6209         return 0;
6210 }
6211
6212 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
6213 {
6214         u64 ns;
6215         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6216
6217         tg3_full_lock(tp, 0);
6218         ns = tg3_refclk_read(tp);
6219         ns += tp->ptp_adjust;
6220         tg3_full_unlock(tp);
6221
6222         *ts = ns_to_timespec64(ns);
6223
6224         return 0;
6225 }
6226
6227 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6228                            const struct timespec64 *ts)
6229 {
6230         u64 ns;
6231         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6232
6233         ns = timespec64_to_ns(ts);
6234
6235         tg3_full_lock(tp, 0);
6236         tg3_refclk_write(tp, ns);
6237         tp->ptp_adjust = 0;
6238         tg3_full_unlock(tp);
6239
6240         return 0;
6241 }
6242
6243 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6244                           struct ptp_clock_request *rq, int on)
6245 {
6246         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6247         u32 clock_ctl;
6248         int rval = 0;
6249
6250         switch (rq->type) {
6251         case PTP_CLK_REQ_PEROUT:
6252                 if (rq->perout.index != 0)
6253                         return -EINVAL;
6254
6255                 tg3_full_lock(tp, 0);
6256                 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6257                 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6258
6259                 if (on) {
6260                         u64 nsec;
6261
6262                         nsec = rq->perout.start.sec * 1000000000ULL +
6263                                rq->perout.start.nsec;
6264
6265                         if (rq->perout.period.sec || rq->perout.period.nsec) {
6266                                 netdev_warn(tp->dev,
6267                                             "Device supports only a one-shot timesync output, period must be 0\n");
6268                                 rval = -EINVAL;
6269                                 goto err_out;
6270                         }
6271
6272                         if (nsec & (1ULL << 63)) {
6273                                 netdev_warn(tp->dev,
6274                                             "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6275                                 rval = -EINVAL;
6276                                 goto err_out;
6277                         }
6278
6279                         tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6280                         tw32(TG3_EAV_WATCHDOG0_MSB,
6281                              TG3_EAV_WATCHDOG0_EN |
6282                              ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6283
6284                         tw32(TG3_EAV_REF_CLCK_CTL,
6285                              clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6286                 } else {
6287                         tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6288                         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6289                 }
6290
6291 err_out:
6292                 tg3_full_unlock(tp);
6293                 return rval;
6294
6295         default:
6296                 break;
6297         }
6298
6299         return -EOPNOTSUPP;
6300 }
6301
6302 static const struct ptp_clock_info tg3_ptp_caps = {
6303         .owner          = THIS_MODULE,
6304         .name           = "tg3 clock",
6305         .max_adj        = 250000000,
6306         .n_alarm        = 0,
6307         .n_ext_ts       = 0,
6308         .n_per_out      = 1,
6309         .n_pins         = 0,
6310         .pps            = 0,
6311         .adjfreq        = tg3_ptp_adjfreq,
6312         .adjtime        = tg3_ptp_adjtime,
6313         .gettime64      = tg3_ptp_gettime,
6314         .settime64      = tg3_ptp_settime,
6315         .enable         = tg3_ptp_enable,
6316 };
6317
6318 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6319                                      struct skb_shared_hwtstamps *timestamp)
6320 {
6321         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6322         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6323                                            tp->ptp_adjust);
6324 }
6325
6326 /* tp->lock must be held */
6327 static void tg3_ptp_init(struct tg3 *tp)
6328 {
6329         if (!tg3_flag(tp, PTP_CAPABLE))
6330                 return;
6331
6332         /* Initialize the hardware clock to the system time. */
6333         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6334         tp->ptp_adjust = 0;
6335         tp->ptp_info = tg3_ptp_caps;
6336 }
6337
6338 /* tp->lock must be held */
6339 static void tg3_ptp_resume(struct tg3 *tp)
6340 {
6341         if (!tg3_flag(tp, PTP_CAPABLE))
6342                 return;
6343
6344         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6345         tp->ptp_adjust = 0;
6346 }
6347
6348 static void tg3_ptp_fini(struct tg3 *tp)
6349 {
6350         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6351                 return;
6352
6353         ptp_clock_unregister(tp->ptp_clock);
6354         tp->ptp_clock = NULL;
6355         tp->ptp_adjust = 0;
6356 }
6357
6358 static inline int tg3_irq_sync(struct tg3 *tp)
6359 {
6360         return tp->irq_sync;
6361 }
6362
6363 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6364 {
6365         int i;
6366
6367         dst = (u32 *)((u8 *)dst + off);
6368         for (i = 0; i < len; i += sizeof(u32))
6369                 *dst++ = tr32(off + i);
6370 }
6371
6372 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6373 {
6374         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6375         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6376         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6377         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6378         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6379         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6380         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6381         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6382         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6383         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6384         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6385         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6386         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6387         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6388         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6389         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6390         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6391         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6392         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6393
6394         if (tg3_flag(tp, SUPPORT_MSIX))
6395                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6396
6397         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6398         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6399         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6400         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6401         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6402         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6403         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6404         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6405
6406         if (!tg3_flag(tp, 5705_PLUS)) {
6407                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6408                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6409                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6410         }
6411
6412         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6413         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6414         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6415         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6416         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6417
6418         if (tg3_flag(tp, NVRAM))
6419                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6420 }
6421
6422 static void tg3_dump_state(struct tg3 *tp)
6423 {
6424         int i;
6425         u32 *regs;
6426
6427         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6428         if (!regs)
6429                 return;
6430
6431         if (tg3_flag(tp, PCI_EXPRESS)) {
6432                 /* Read up to but not including private PCI registers */
6433                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6434                         regs[i / sizeof(u32)] = tr32(i);
6435         } else
6436                 tg3_dump_legacy_regs(tp, regs);
6437
6438         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6439                 if (!regs[i + 0] && !regs[i + 1] &&
6440                     !regs[i + 2] && !regs[i + 3])
6441                         continue;
6442
6443                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6444                            i * 4,
6445                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6446         }
6447
6448         kfree(regs);
6449
6450         for (i = 0; i < tp->irq_cnt; i++) {
6451                 struct tg3_napi *tnapi = &tp->napi[i];
6452
6453                 /* SW status block */
6454                 netdev_err(tp->dev,
6455                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6456                            i,
6457                            tnapi->hw_status->status,
6458                            tnapi->hw_status->status_tag,
6459                            tnapi->hw_status->rx_jumbo_consumer,
6460                            tnapi->hw_status->rx_consumer,
6461                            tnapi->hw_status->rx_mini_consumer,
6462                            tnapi->hw_status->idx[0].rx_producer,
6463                            tnapi->hw_status->idx[0].tx_consumer);
6464
6465                 netdev_err(tp->dev,
6466                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6467                            i,
6468                            tnapi->last_tag, tnapi->last_irq_tag,
6469                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6470                            tnapi->rx_rcb_ptr,
6471                            tnapi->prodring.rx_std_prod_idx,
6472                            tnapi->prodring.rx_std_cons_idx,
6473                            tnapi->prodring.rx_jmb_prod_idx,
6474                            tnapi->prodring.rx_jmb_cons_idx);
6475         }
6476 }
6477
6478 /* This is called whenever we suspect that the system chipset is re-
6479  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6480  * is bogus tx completions. We try to recover by setting the
6481  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6482  * in the workqueue.
6483  */
6484 static void tg3_tx_recover(struct tg3 *tp)
6485 {
6486         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6487                tp->write32_tx_mbox == tg3_write_indirect_mbox);
6488
6489         netdev_warn(tp->dev,
6490                     "The system may be re-ordering memory-mapped I/O "
6491                     "cycles to the network device, attempting to recover. "
6492                     "Please report the problem to the driver maintainer "
6493                     "and include system chipset information.\n");
6494
6495         tg3_flag_set(tp, TX_RECOVERY_PENDING);
6496 }
6497
6498 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6499 {
6500         /* Tell compiler to fetch tx indices from memory. */
6501         barrier();
6502         return tnapi->tx_pending -
6503                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6504 }
6505
6506 /* Tigon3 never reports partial packet sends.  So we do not
6507  * need special logic to handle SKBs that have not had all
6508  * of their frags sent yet, like SunGEM does.
6509  */
6510 static void tg3_tx(struct tg3_napi *tnapi)
6511 {
6512         struct tg3 *tp = tnapi->tp;
6513         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6514         u32 sw_idx = tnapi->tx_cons;
6515         struct netdev_queue *txq;
6516         int index = tnapi - tp->napi;
6517         unsigned int pkts_compl = 0, bytes_compl = 0;
6518
6519         if (tg3_flag(tp, ENABLE_TSS))
6520                 index--;
6521
6522         txq = netdev_get_tx_queue(tp->dev, index);
6523
6524         while (sw_idx != hw_idx) {
6525                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6526                 struct sk_buff *skb = ri->skb;
6527                 int i, tx_bug = 0;
6528
6529                 if (unlikely(skb == NULL)) {
6530                         tg3_tx_recover(tp);
6531                         return;
6532                 }
6533
6534                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6535                         struct skb_shared_hwtstamps timestamp;
6536                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6537                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6538
6539                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6540
6541                         skb_tstamp_tx(skb, &timestamp);
6542                 }
6543
6544                 pci_unmap_single(tp->pdev,
6545                                  dma_unmap_addr(ri, mapping),
6546                                  skb_headlen(skb),
6547                                  PCI_DMA_TODEVICE);
6548
6549                 ri->skb = NULL;
6550
6551                 while (ri->fragmented) {
6552                         ri->fragmented = false;
6553                         sw_idx = NEXT_TX(sw_idx);
6554                         ri = &tnapi->tx_buffers[sw_idx];
6555                 }
6556
6557                 sw_idx = NEXT_TX(sw_idx);
6558
6559                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6560                         ri = &tnapi->tx_buffers[sw_idx];
6561                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6562                                 tx_bug = 1;
6563
6564                         pci_unmap_page(tp->pdev,
6565                                        dma_unmap_addr(ri, mapping),
6566                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
6567                                        PCI_DMA_TODEVICE);
6568
6569                         while (ri->fragmented) {
6570                                 ri->fragmented = false;
6571                                 sw_idx = NEXT_TX(sw_idx);
6572                                 ri = &tnapi->tx_buffers[sw_idx];
6573                         }
6574
6575                         sw_idx = NEXT_TX(sw_idx);
6576                 }
6577
6578                 pkts_compl++;
6579                 bytes_compl += skb->len;
6580
6581                 dev_consume_skb_any(skb);
6582
6583                 if (unlikely(tx_bug)) {
6584                         tg3_tx_recover(tp);
6585                         return;
6586                 }
6587         }
6588
6589         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6590
6591         tnapi->tx_cons = sw_idx;
6592
6593         /* Need to make the tx_cons update visible to tg3_start_xmit()
6594          * before checking for netif_queue_stopped().  Without the
6595          * memory barrier, there is a small possibility that tg3_start_xmit()
6596          * will miss it and cause the queue to be stopped forever.
6597          */
6598         smp_mb();
6599
6600         if (unlikely(netif_tx_queue_stopped(txq) &&
6601                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6602                 __netif_tx_lock(txq, smp_processor_id());
6603                 if (netif_tx_queue_stopped(txq) &&
6604                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6605                         netif_tx_wake_queue(txq);
6606                 __netif_tx_unlock(txq);
6607         }
6608 }
6609
6610 static void tg3_frag_free(bool is_frag, void *data)
6611 {
6612         if (is_frag)
6613                 skb_free_frag(data);
6614         else
6615                 kfree(data);
6616 }
6617
6618 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6619 {
6620         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6621                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6622
6623         if (!ri->data)
6624                 return;
6625
6626         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6627                          map_sz, PCI_DMA_FROMDEVICE);
6628         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6629         ri->data = NULL;
6630 }
6631
6632
6633 /* Returns size of skb allocated or < 0 on error.
6634  *
6635  * We only need to fill in the address because the other members
6636  * of the RX descriptor are invariant, see tg3_init_rings.
6637  *
6638  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6639  * posting buffers we only dirty the first cache line of the RX
6640  * descriptor (containing the address).  Whereas for the RX status
6641  * buffers the cpu only reads the last cacheline of the RX descriptor
6642  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6643  */
6644 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6645                              u32 opaque_key, u32 dest_idx_unmasked,
6646                              unsigned int *frag_size)
6647 {
6648         struct tg3_rx_buffer_desc *desc;
6649         struct ring_info *map;
6650         u8 *data;
6651         dma_addr_t mapping;
6652         int skb_size, data_size, dest_idx;
6653
6654         switch (opaque_key) {
6655         case RXD_OPAQUE_RING_STD:
6656                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6657                 desc = &tpr->rx_std[dest_idx];
6658                 map = &tpr->rx_std_buffers[dest_idx];
6659                 data_size = tp->rx_pkt_map_sz;
6660                 break;
6661
6662         case RXD_OPAQUE_RING_JUMBO:
6663                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6664                 desc = &tpr->rx_jmb[dest_idx].std;
6665                 map = &tpr->rx_jmb_buffers[dest_idx];
6666                 data_size = TG3_RX_JMB_MAP_SZ;
6667                 break;
6668
6669         default:
6670                 return -EINVAL;
6671         }
6672
6673         /* Do not overwrite any of the map or rp information
6674          * until we are sure we can commit to a new buffer.
6675          *
6676          * Callers depend upon this behavior and assume that
6677          * we leave everything unchanged if we fail.
6678          */
6679         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6680                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6681         if (skb_size <= PAGE_SIZE) {
6682                 data = netdev_alloc_frag(skb_size);
6683                 *frag_size = skb_size;
6684         } else {
6685                 data = kmalloc(skb_size, GFP_ATOMIC);
6686                 *frag_size = 0;
6687         }
6688         if (!data)
6689                 return -ENOMEM;
6690
6691         mapping = pci_map_single(tp->pdev,
6692                                  data + TG3_RX_OFFSET(tp),
6693                                  data_size,
6694                                  PCI_DMA_FROMDEVICE);
6695         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6696                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6697                 return -EIO;
6698         }
6699
6700         map->data = data;
6701         dma_unmap_addr_set(map, mapping, mapping);
6702
6703         desc->addr_hi = ((u64)mapping >> 32);
6704         desc->addr_lo = ((u64)mapping & 0xffffffff);
6705
6706         return data_size;
6707 }
6708
6709 /* We only need to move over in the address because the other
6710  * members of the RX descriptor are invariant.  See notes above
6711  * tg3_alloc_rx_data for full details.
6712  */
6713 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6714                            struct tg3_rx_prodring_set *dpr,
6715                            u32 opaque_key, int src_idx,
6716                            u32 dest_idx_unmasked)
6717 {
6718         struct tg3 *tp = tnapi->tp;
6719         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6720         struct ring_info *src_map, *dest_map;
6721         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6722         int dest_idx;
6723
6724         switch (opaque_key) {
6725         case RXD_OPAQUE_RING_STD:
6726                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6727                 dest_desc = &dpr->rx_std[dest_idx];
6728                 dest_map = &dpr->rx_std_buffers[dest_idx];
6729                 src_desc = &spr->rx_std[src_idx];
6730                 src_map = &spr->rx_std_buffers[src_idx];
6731                 break;
6732
6733         case RXD_OPAQUE_RING_JUMBO:
6734                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6735                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6736                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6737                 src_desc = &spr->rx_jmb[src_idx].std;
6738                 src_map = &spr->rx_jmb_buffers[src_idx];
6739                 break;
6740
6741         default:
6742                 return;
6743         }
6744
6745         dest_map->data = src_map->data;
6746         dma_unmap_addr_set(dest_map, mapping,
6747                            dma_unmap_addr(src_map, mapping));
6748         dest_desc->addr_hi = src_desc->addr_hi;
6749         dest_desc->addr_lo = src_desc->addr_lo;
6750
6751         /* Ensure that the update to the skb happens after the physical
6752          * addresses have been transferred to the new BD location.
6753          */
6754         smp_wmb();
6755
6756         src_map->data = NULL;
6757 }
6758
6759 /* The RX ring scheme is composed of multiple rings which post fresh
6760  * buffers to the chip, and one special ring the chip uses to report
6761  * status back to the host.
6762  *
6763  * The special ring reports the status of received packets to the
6764  * host.  The chip does not write into the original descriptor the
6765  * RX buffer was obtained from.  The chip simply takes the original
6766  * descriptor as provided by the host, updates the status and length
6767  * field, then writes this into the next status ring entry.
6768  *
6769  * Each ring the host uses to post buffers to the chip is described
6770  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6771  * it is first placed into the on-chip ram.  When the packet's length
6772  * is known, it walks down the TG3_BDINFO entries to select the ring.
6773  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6774  * which is within the range of the new packet's length is chosen.
6775  *
6776  * The "separate ring for rx status" scheme may sound queer, but it makes
6777  * sense from a cache coherency perspective.  If only the host writes
6778  * to the buffer post rings, and only the chip writes to the rx status
6779  * rings, then cache lines never move beyond shared-modified state.
6780  * If both the host and chip were to write into the same ring, cache line
6781  * eviction could occur since both entities want it in an exclusive state.
6782  */
6783 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6784 {
6785         struct tg3 *tp = tnapi->tp;
6786         u32 work_mask, rx_std_posted = 0;
6787         u32 std_prod_idx, jmb_prod_idx;
6788         u32 sw_idx = tnapi->rx_rcb_ptr;
6789         u16 hw_idx;
6790         int received;
6791         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6792
6793         hw_idx = *(tnapi->rx_rcb_prod_idx);
6794         /*
6795          * We need to order the read of hw_idx and the read of
6796          * the opaque cookie.
6797          */
6798         rmb();
6799         work_mask = 0;
6800         received = 0;
6801         std_prod_idx = tpr->rx_std_prod_idx;
6802         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6803         while (sw_idx != hw_idx && budget > 0) {
6804                 struct ring_info *ri;
6805                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6806                 unsigned int len;
6807                 struct sk_buff *skb;
6808                 dma_addr_t dma_addr;
6809                 u32 opaque_key, desc_idx, *post_ptr;
6810                 u8 *data;
6811                 u64 tstamp = 0;
6812
6813                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6814                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6815                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6816                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6817                         dma_addr = dma_unmap_addr(ri, mapping);
6818                         data = ri->data;
6819                         post_ptr = &std_prod_idx;
6820                         rx_std_posted++;
6821                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6822                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6823                         dma_addr = dma_unmap_addr(ri, mapping);
6824                         data = ri->data;
6825                         post_ptr = &jmb_prod_idx;
6826                 } else
6827                         goto next_pkt_nopost;
6828
6829                 work_mask |= opaque_key;
6830
6831                 if (desc->err_vlan & RXD_ERR_MASK) {
6832                 drop_it:
6833                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6834                                        desc_idx, *post_ptr);
6835                 drop_it_no_recycle:
6836                         /* Other statistics kept track of by card. */
6837                         tp->rx_dropped++;
6838                         goto next_pkt;
6839                 }
6840
6841                 prefetch(data + TG3_RX_OFFSET(tp));
6842                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6843                       ETH_FCS_LEN;
6844
6845                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6846                      RXD_FLAG_PTPSTAT_PTPV1 ||
6847                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6848                      RXD_FLAG_PTPSTAT_PTPV2) {
6849                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6850                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6851                 }
6852
6853                 if (len > TG3_RX_COPY_THRESH(tp)) {
6854                         int skb_size;
6855                         unsigned int frag_size;
6856
6857                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6858                                                     *post_ptr, &frag_size);
6859                         if (skb_size < 0)
6860                                 goto drop_it;
6861
6862                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6863                                          PCI_DMA_FROMDEVICE);
6864
6865                         /* Ensure that the update to the data happens
6866                          * after the usage of the old DMA mapping.
6867                          */
6868                         smp_wmb();
6869
6870                         ri->data = NULL;
6871
6872                         skb = build_skb(data, frag_size);
6873                         if (!skb) {
6874                                 tg3_frag_free(frag_size != 0, data);
6875                                 goto drop_it_no_recycle;
6876                         }
6877                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6878                 } else {
6879                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6880                                        desc_idx, *post_ptr);
6881
6882                         skb = netdev_alloc_skb(tp->dev,
6883                                                len + TG3_RAW_IP_ALIGN);
6884                         if (skb == NULL)
6885                                 goto drop_it_no_recycle;
6886
6887                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6888                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6889                         memcpy(skb->data,
6890                                data + TG3_RX_OFFSET(tp),
6891                                len);
6892                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6893                 }
6894
6895                 skb_put(skb, len);
6896                 if (tstamp)
6897                         tg3_hwclock_to_timestamp(tp, tstamp,
6898                                                  skb_hwtstamps(skb));
6899
6900                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6901                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6902                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6903                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6904                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6905                 else
6906                         skb_checksum_none_assert(skb);
6907
6908                 skb->protocol = eth_type_trans(skb, tp->dev);
6909
6910                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6911                     skb->protocol != htons(ETH_P_8021Q) &&
6912                     skb->protocol != htons(ETH_P_8021AD)) {
6913                         dev_kfree_skb_any(skb);
6914                         goto drop_it_no_recycle;
6915                 }
6916
6917                 if (desc->type_flags & RXD_FLAG_VLAN &&
6918                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6919                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6920                                                desc->err_vlan & RXD_VLAN_MASK);
6921
6922                 napi_gro_receive(&tnapi->napi, skb);
6923
6924                 received++;
6925                 budget--;
6926
6927 next_pkt:
6928                 (*post_ptr)++;
6929
6930                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6931                         tpr->rx_std_prod_idx = std_prod_idx &
6932                                                tp->rx_std_ring_mask;
6933                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6934                                      tpr->rx_std_prod_idx);
6935                         work_mask &= ~RXD_OPAQUE_RING_STD;
6936                         rx_std_posted = 0;
6937                 }
6938 next_pkt_nopost:
6939                 sw_idx++;
6940                 sw_idx &= tp->rx_ret_ring_mask;
6941
6942                 /* Refresh hw_idx to see if there is new work */
6943                 if (sw_idx == hw_idx) {
6944                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6945                         rmb();
6946                 }
6947         }
6948
6949         /* ACK the status ring. */
6950         tnapi->rx_rcb_ptr = sw_idx;
6951         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6952
6953         /* Refill RX ring(s). */
6954         if (!tg3_flag(tp, ENABLE_RSS)) {
6955                 /* Sync BD data before updating mailbox */
6956                 wmb();
6957
6958                 if (work_mask & RXD_OPAQUE_RING_STD) {
6959                         tpr->rx_std_prod_idx = std_prod_idx &
6960                                                tp->rx_std_ring_mask;
6961                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6962                                      tpr->rx_std_prod_idx);
6963                 }
6964                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6965                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6966                                                tp->rx_jmb_ring_mask;
6967                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6968                                      tpr->rx_jmb_prod_idx);
6969                 }
6970                 mmiowb();
6971         } else if (work_mask) {
6972                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6973                  * updated before the producer indices can be updated.
6974                  */
6975                 smp_wmb();
6976
6977                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6978                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6979
6980                 if (tnapi != &tp->napi[1]) {
6981                         tp->rx_refill = true;
6982                         napi_schedule(&tp->napi[1].napi);
6983                 }
6984         }
6985
6986         return received;
6987 }
6988
6989 static void tg3_poll_link(struct tg3 *tp)
6990 {
6991         /* handle link change and other phy events */
6992         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6993                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6994
6995                 if (sblk->status & SD_STATUS_LINK_CHG) {
6996                         sblk->status = SD_STATUS_UPDATED |
6997                                        (sblk->status & ~SD_STATUS_LINK_CHG);
6998                         spin_lock(&tp->lock);
6999                         if (tg3_flag(tp, USE_PHYLIB)) {
7000                                 tw32_f(MAC_STATUS,
7001                                      (MAC_STATUS_SYNC_CHANGED |
7002                                       MAC_STATUS_CFG_CHANGED |
7003                                       MAC_STATUS_MI_COMPLETION |
7004                                       MAC_STATUS_LNKSTATE_CHANGED));
7005                                 udelay(40);
7006                         } else
7007                                 tg3_setup_phy(tp, false);
7008                         spin_unlock(&tp->lock);
7009                 }
7010         }
7011 }
7012
7013 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7014                                 struct tg3_rx_prodring_set *dpr,
7015                                 struct tg3_rx_prodring_set *spr)
7016 {
7017         u32 si, di, cpycnt, src_prod_idx;
7018         int i, err = 0;
7019
7020         while (1) {
7021                 src_prod_idx = spr->rx_std_prod_idx;
7022
7023                 /* Make sure updates to the rx_std_buffers[] entries and the
7024                  * standard producer index are seen in the correct order.
7025                  */
7026                 smp_rmb();
7027
7028                 if (spr->rx_std_cons_idx == src_prod_idx)
7029                         break;
7030
7031                 if (spr->rx_std_cons_idx < src_prod_idx)
7032                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7033                 else
7034                         cpycnt = tp->rx_std_ring_mask + 1 -
7035                                  spr->rx_std_cons_idx;
7036
7037                 cpycnt = min(cpycnt,
7038                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7039
7040                 si = spr->rx_std_cons_idx;
7041                 di = dpr->rx_std_prod_idx;
7042
7043                 for (i = di; i < di + cpycnt; i++) {
7044                         if (dpr->rx_std_buffers[i].data) {
7045                                 cpycnt = i - di;
7046                                 err = -ENOSPC;
7047                                 break;
7048                         }
7049                 }
7050
7051                 if (!cpycnt)
7052                         break;
7053
7054                 /* Ensure that updates to the rx_std_buffers ring and the
7055                  * shadowed hardware producer ring from tg3_recycle_skb() are
7056                  * ordered correctly WRT the skb check above.
7057                  */
7058                 smp_rmb();
7059
7060                 memcpy(&dpr->rx_std_buffers[di],
7061                        &spr->rx_std_buffers[si],
7062                        cpycnt * sizeof(struct ring_info));
7063
7064                 for (i = 0; i < cpycnt; i++, di++, si++) {
7065                         struct tg3_rx_buffer_desc *sbd, *dbd;
7066                         sbd = &spr->rx_std[si];
7067                         dbd = &dpr->rx_std[di];
7068                         dbd->addr_hi = sbd->addr_hi;
7069                         dbd->addr_lo = sbd->addr_lo;
7070                 }
7071
7072                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7073                                        tp->rx_std_ring_mask;
7074                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7075                                        tp->rx_std_ring_mask;
7076         }
7077
7078         while (1) {
7079                 src_prod_idx = spr->rx_jmb_prod_idx;
7080
7081                 /* Make sure updates to the rx_jmb_buffers[] entries and
7082                  * the jumbo producer index are seen in the correct order.
7083                  */
7084                 smp_rmb();
7085
7086                 if (spr->rx_jmb_cons_idx == src_prod_idx)
7087                         break;
7088
7089                 if (spr->rx_jmb_cons_idx < src_prod_idx)
7090                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7091                 else
7092                         cpycnt = tp->rx_jmb_ring_mask + 1 -
7093                                  spr->rx_jmb_cons_idx;
7094
7095                 cpycnt = min(cpycnt,
7096                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7097
7098                 si = spr->rx_jmb_cons_idx;
7099                 di = dpr->rx_jmb_prod_idx;
7100
7101                 for (i = di; i < di + cpycnt; i++) {
7102                         if (dpr->rx_jmb_buffers[i].data) {
7103                                 cpycnt = i - di;
7104                                 err = -ENOSPC;
7105                                 break;
7106                         }
7107                 }
7108
7109                 if (!cpycnt)
7110                         break;
7111
7112                 /* Ensure that updates to the rx_jmb_buffers ring and the
7113                  * shadowed hardware producer ring from tg3_recycle_skb() are
7114                  * ordered correctly WRT the skb check above.
7115                  */
7116                 smp_rmb();
7117
7118                 memcpy(&dpr->rx_jmb_buffers[di],
7119                        &spr->rx_jmb_buffers[si],
7120                        cpycnt * sizeof(struct ring_info));
7121
7122                 for (i = 0; i < cpycnt; i++, di++, si++) {
7123                         struct tg3_rx_buffer_desc *sbd, *dbd;
7124                         sbd = &spr->rx_jmb[si].std;
7125                         dbd = &dpr->rx_jmb[di].std;
7126                         dbd->addr_hi = sbd->addr_hi;
7127                         dbd->addr_lo = sbd->addr_lo;
7128                 }
7129
7130                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7131                                        tp->rx_jmb_ring_mask;
7132                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7133                                        tp->rx_jmb_ring_mask;
7134         }
7135
7136         return err;
7137 }
7138
7139 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7140 {
7141         struct tg3 *tp = tnapi->tp;
7142
7143         /* run TX completion thread */
7144         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7145                 tg3_tx(tnapi);
7146                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7147                         return work_done;
7148         }
7149
7150         if (!tnapi->rx_rcb_prod_idx)
7151                 return work_done;
7152
7153         /* run RX thread, within the bounds set by NAPI.
7154          * All RX "locking" is done by ensuring outside
7155          * code synchronizes with tg3->napi.poll()
7156          */
7157         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7158                 work_done += tg3_rx(tnapi, budget - work_done);
7159
7160         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7161                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7162                 int i, err = 0;
7163                 u32 std_prod_idx = dpr->rx_std_prod_idx;
7164                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7165
7166                 tp->rx_refill = false;
7167                 for (i = 1; i <= tp->rxq_cnt; i++)
7168                         err |= tg3_rx_prodring_xfer(tp, dpr,
7169                                                     &tp->napi[i].prodring);
7170
7171                 wmb();
7172
7173                 if (std_prod_idx != dpr->rx_std_prod_idx)
7174                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7175                                      dpr->rx_std_prod_idx);
7176
7177                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7178                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7179                                      dpr->rx_jmb_prod_idx);
7180
7181                 mmiowb();
7182
7183                 if (err)
7184                         tw32_f(HOSTCC_MODE, tp->coal_now);
7185         }
7186
7187         return work_done;
7188 }
7189
7190 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7191 {
7192         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7193                 schedule_work(&tp->reset_task);
7194 }
7195
7196 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7197 {
7198         if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7199                 cancel_work_sync(&tp->reset_task);
7200         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7201 }
7202
7203 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7204 {
7205         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7206         struct tg3 *tp = tnapi->tp;
7207         int work_done = 0;
7208         struct tg3_hw_status *sblk = tnapi->hw_status;
7209
7210         while (1) {
7211                 work_done = tg3_poll_work(tnapi, work_done, budget);
7212
7213                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7214                         goto tx_recovery;
7215
7216                 if (unlikely(work_done >= budget))
7217                         break;
7218
7219                 /* tp->last_tag is used in tg3_int_reenable() below
7220                  * to tell the hw how much work has been processed,
7221                  * so we must read it before checking for more work.
7222                  */
7223                 tnapi->last_tag = sblk->status_tag;
7224                 tnapi->last_irq_tag = tnapi->last_tag;
7225                 rmb();
7226
7227                 /* check for RX/TX work to do */
7228                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7229                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7230
7231                         /* This test here is not race free, but will reduce
7232                          * the number of interrupts by looping again.
7233                          */
7234                         if (tnapi == &tp->napi[1] && tp->rx_refill)
7235                                 continue;
7236
7237                         napi_complete_done(napi, work_done);
7238                         /* Reenable interrupts. */
7239                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7240
7241                         /* This test here is synchronized by napi_schedule()
7242                          * and napi_complete() to close the race condition.
7243                          */
7244                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7245                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
7246                                                   HOSTCC_MODE_ENABLE |
7247                                                   tnapi->coal_now);
7248                         }
7249                         mmiowb();
7250                         break;
7251                 }
7252         }
7253
7254         return work_done;
7255
7256 tx_recovery:
7257         /* work_done is guaranteed to be less than budget. */
7258         napi_complete(napi);
7259         tg3_reset_task_schedule(tp);
7260         return work_done;
7261 }
7262
7263 static void tg3_process_error(struct tg3 *tp)
7264 {
7265         u32 val;
7266         bool real_error = false;
7267
7268         if (tg3_flag(tp, ERROR_PROCESSED))
7269                 return;
7270
7271         /* Check Flow Attention register */
7272         val = tr32(HOSTCC_FLOW_ATTN);
7273         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7274                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7275                 real_error = true;
7276         }
7277
7278         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7279                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7280                 real_error = true;
7281         }
7282
7283         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7284                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7285                 real_error = true;
7286         }
7287
7288         if (!real_error)
7289                 return;
7290
7291         tg3_dump_state(tp);
7292
7293         tg3_flag_set(tp, ERROR_PROCESSED);
7294         tg3_reset_task_schedule(tp);
7295 }
7296
7297 static int tg3_poll(struct napi_struct *napi, int budget)
7298 {
7299         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7300         struct tg3 *tp = tnapi->tp;
7301         int work_done = 0;
7302         struct tg3_hw_status *sblk = tnapi->hw_status;
7303
7304         while (1) {
7305                 if (sblk->status & SD_STATUS_ERROR)
7306                         tg3_process_error(tp);
7307
7308                 tg3_poll_link(tp);
7309
7310                 work_done = tg3_poll_work(tnapi, work_done, budget);
7311
7312                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7313                         goto tx_recovery;
7314
7315                 if (unlikely(work_done >= budget))
7316                         break;
7317
7318                 if (tg3_flag(tp, TAGGED_STATUS)) {
7319                         /* tp->last_tag is used in tg3_int_reenable() below
7320                          * to tell the hw how much work has been processed,
7321                          * so we must read it before checking for more work.
7322                          */
7323                         tnapi->last_tag = sblk->status_tag;
7324                         tnapi->last_irq_tag = tnapi->last_tag;
7325                         rmb();
7326                 } else
7327                         sblk->status &= ~SD_STATUS_UPDATED;
7328
7329                 if (likely(!tg3_has_work(tnapi))) {
7330                         napi_complete_done(napi, work_done);
7331                         tg3_int_reenable(tnapi);
7332                         break;
7333                 }
7334         }
7335
7336         return work_done;
7337
7338 tx_recovery:
7339         /* work_done is guaranteed to be less than budget. */
7340         napi_complete(napi);
7341         tg3_reset_task_schedule(tp);
7342         return work_done;
7343 }
7344
7345 static void tg3_napi_disable(struct tg3 *tp)
7346 {
7347         int i;
7348
7349         for (i = tp->irq_cnt - 1; i >= 0; i--)
7350                 napi_disable(&tp->napi[i].napi);
7351 }
7352
7353 static void tg3_napi_enable(struct tg3 *tp)
7354 {
7355         int i;
7356
7357         for (i = 0; i < tp->irq_cnt; i++)
7358                 napi_enable(&tp->napi[i].napi);
7359 }
7360
7361 static void tg3_napi_init(struct tg3 *tp)
7362 {
7363         int i;
7364
7365         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7366         for (i = 1; i < tp->irq_cnt; i++)
7367                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7368 }
7369
7370 static void tg3_napi_fini(struct tg3 *tp)
7371 {
7372         int i;
7373
7374         for (i = 0; i < tp->irq_cnt; i++)
7375                 netif_napi_del(&tp->napi[i].napi);
7376 }
7377
7378 static inline void tg3_netif_stop(struct tg3 *tp)
7379 {
7380         netif_trans_update(tp->dev);    /* prevent tx timeout */
7381         tg3_napi_disable(tp);
7382         netif_carrier_off(tp->dev);
7383         netif_tx_disable(tp->dev);
7384 }
7385
7386 /* tp->lock must be held */
7387 static inline void tg3_netif_start(struct tg3 *tp)
7388 {
7389         tg3_ptp_resume(tp);
7390
7391         /* NOTE: unconditional netif_tx_wake_all_queues is only
7392          * appropriate so long as all callers are assured to
7393          * have free tx slots (such as after tg3_init_hw)
7394          */
7395         netif_tx_wake_all_queues(tp->dev);
7396
7397         if (tp->link_up)
7398                 netif_carrier_on(tp->dev);
7399
7400         tg3_napi_enable(tp);
7401         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7402         tg3_enable_ints(tp);
7403 }
7404
7405 static void tg3_irq_quiesce(struct tg3 *tp)
7406         __releases(tp->lock)
7407         __acquires(tp->lock)
7408 {
7409         int i;
7410
7411         BUG_ON(tp->irq_sync);
7412
7413         tp->irq_sync = 1;
7414         smp_mb();
7415
7416         spin_unlock_bh(&tp->lock);
7417
7418         for (i = 0; i < tp->irq_cnt; i++)
7419                 synchronize_irq(tp->napi[i].irq_vec);
7420
7421         spin_lock_bh(&tp->lock);
7422 }
7423
7424 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7425  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7426  * with as well.  Most of the time, this is not necessary except when
7427  * shutting down the device.
7428  */
7429 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7430 {
7431         spin_lock_bh(&tp->lock);
7432         if (irq_sync)
7433                 tg3_irq_quiesce(tp);
7434 }
7435
7436 static inline void tg3_full_unlock(struct tg3 *tp)
7437 {
7438         spin_unlock_bh(&tp->lock);
7439 }
7440
7441 /* One-shot MSI handler - Chip automatically disables interrupt
7442  * after sending MSI so driver doesn't have to do it.
7443  */
7444 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7445 {
7446         struct tg3_napi *tnapi = dev_id;
7447         struct tg3 *tp = tnapi->tp;
7448
7449         prefetch(tnapi->hw_status);
7450         if (tnapi->rx_rcb)
7451                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7452
7453         if (likely(!tg3_irq_sync(tp)))
7454                 napi_schedule(&tnapi->napi);
7455
7456         return IRQ_HANDLED;
7457 }
7458
7459 /* MSI ISR - No need to check for interrupt sharing and no need to
7460  * flush status block and interrupt mailbox. PCI ordering rules
7461  * guarantee that MSI will arrive after the status block.
7462  */
7463 static irqreturn_t tg3_msi(int irq, void *dev_id)
7464 {
7465         struct tg3_napi *tnapi = dev_id;
7466         struct tg3 *tp = tnapi->tp;
7467
7468         prefetch(tnapi->hw_status);
7469         if (tnapi->rx_rcb)
7470                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7471         /*
7472          * Writing any value to intr-mbox-0 clears PCI INTA# and
7473          * chip-internal interrupt pending events.
7474          * Writing non-zero to intr-mbox-0 additional tells the
7475          * NIC to stop sending us irqs, engaging "in-intr-handler"
7476          * event coalescing.
7477          */
7478         tw32_mailbox(tnapi->int_mbox, 0x00000001);
7479         if (likely(!tg3_irq_sync(tp)))
7480                 napi_schedule(&tnapi->napi);
7481
7482         return IRQ_RETVAL(1);
7483 }
7484
7485 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7486 {
7487         struct tg3_napi *tnapi = dev_id;
7488         struct tg3 *tp = tnapi->tp;
7489         struct tg3_hw_status *sblk = tnapi->hw_status;
7490         unsigned int handled = 1;
7491
7492         /* In INTx mode, it is possible for the interrupt to arrive at
7493          * the CPU before the status block posted prior to the interrupt.
7494          * Reading the PCI State register will confirm whether the
7495          * interrupt is ours and will flush the status block.
7496          */
7497         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7498                 if (tg3_flag(tp, CHIP_RESETTING) ||
7499                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7500                         handled = 0;
7501                         goto out;
7502                 }
7503         }
7504
7505         /*
7506          * Writing any value to intr-mbox-0 clears PCI INTA# and
7507          * chip-internal interrupt pending events.
7508          * Writing non-zero to intr-mbox-0 additional tells the
7509          * NIC to stop sending us irqs, engaging "in-intr-handler"
7510          * event coalescing.
7511          *
7512          * Flush the mailbox to de-assert the IRQ immediately to prevent
7513          * spurious interrupts.  The flush impacts performance but
7514          * excessive spurious interrupts can be worse in some cases.
7515          */
7516         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7517         if (tg3_irq_sync(tp))
7518                 goto out;
7519         sblk->status &= ~SD_STATUS_UPDATED;
7520         if (likely(tg3_has_work(tnapi))) {
7521                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7522                 napi_schedule(&tnapi->napi);
7523         } else {
7524                 /* No work, shared interrupt perhaps?  re-enable
7525                  * interrupts, and flush that PCI write
7526                  */
7527                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7528                                0x00000000);
7529         }
7530 out:
7531         return IRQ_RETVAL(handled);
7532 }
7533
7534 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7535 {
7536         struct tg3_napi *tnapi = dev_id;
7537         struct tg3 *tp = tnapi->tp;
7538         struct tg3_hw_status *sblk = tnapi->hw_status;
7539         unsigned int handled = 1;
7540
7541         /* In INTx mode, it is possible for the interrupt to arrive at
7542          * the CPU before the status block posted prior to the interrupt.
7543          * Reading the PCI State register will confirm whether the
7544          * interrupt is ours and will flush the status block.
7545          */
7546         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7547                 if (tg3_flag(tp, CHIP_RESETTING) ||
7548                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7549                         handled = 0;
7550                         goto out;
7551                 }
7552         }
7553
7554         /*
7555          * writing any value to intr-mbox-0 clears PCI INTA# and
7556          * chip-internal interrupt pending events.
7557          * writing non-zero to intr-mbox-0 additional tells the
7558          * NIC to stop sending us irqs, engaging "in-intr-handler"
7559          * event coalescing.
7560          *
7561          * Flush the mailbox to de-assert the IRQ immediately to prevent
7562          * spurious interrupts.  The flush impacts performance but
7563          * excessive spurious interrupts can be worse in some cases.
7564          */
7565         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7566
7567         /*
7568          * In a shared interrupt configuration, sometimes other devices'
7569          * interrupts will scream.  We record the current status tag here
7570          * so that the above check can report that the screaming interrupts
7571          * are unhandled.  Eventually they will be silenced.
7572          */
7573         tnapi->last_irq_tag = sblk->status_tag;
7574
7575         if (tg3_irq_sync(tp))
7576                 goto out;
7577
7578         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7579
7580         napi_schedule(&tnapi->napi);
7581
7582 out:
7583         return IRQ_RETVAL(handled);
7584 }
7585
7586 /* ISR for interrupt test */
7587 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7588 {
7589         struct tg3_napi *tnapi = dev_id;
7590         struct tg3 *tp = tnapi->tp;
7591         struct tg3_hw_status *sblk = tnapi->hw_status;
7592
7593         if ((sblk->status & SD_STATUS_UPDATED) ||
7594             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7595                 tg3_disable_ints(tp);
7596                 return IRQ_RETVAL(1);
7597         }
7598         return IRQ_RETVAL(0);
7599 }
7600
7601 #ifdef CONFIG_NET_POLL_CONTROLLER
7602 static void tg3_poll_controller(struct net_device *dev)
7603 {
7604         int i;
7605         struct tg3 *tp = netdev_priv(dev);
7606
7607         if (tg3_irq_sync(tp))
7608                 return;
7609
7610         for (i = 0; i < tp->irq_cnt; i++)
7611                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7612 }
7613 #endif
7614
7615 static void tg3_tx_timeout(struct net_device *dev)
7616 {
7617         struct tg3 *tp = netdev_priv(dev);
7618
7619         if (netif_msg_tx_err(tp)) {
7620                 netdev_err(dev, "transmit timed out, resetting\n");
7621                 tg3_dump_state(tp);
7622         }
7623
7624         tg3_reset_task_schedule(tp);
7625 }
7626
7627 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7628 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7629 {
7630         u32 base = (u32) mapping & 0xffffffff;
7631
7632         return base + len + 8 < base;
7633 }
7634
7635 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7636  * of any 4GB boundaries: 4G, 8G, etc
7637  */
7638 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7639                                            u32 len, u32 mss)
7640 {
7641         if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7642                 u32 base = (u32) mapping & 0xffffffff;
7643
7644                 return ((base + len + (mss & 0x3fff)) < base);
7645         }
7646         return 0;
7647 }
7648
7649 /* Test for DMA addresses > 40-bit */
7650 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7651                                           int len)
7652 {
7653 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7654         if (tg3_flag(tp, 40BIT_DMA_BUG))
7655                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7656         return 0;
7657 #else
7658         return 0;
7659 #endif
7660 }
7661
7662 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7663                                  dma_addr_t mapping, u32 len, u32 flags,
7664                                  u32 mss, u32 vlan)
7665 {
7666         txbd->addr_hi = ((u64) mapping >> 32);
7667         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7668         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7669         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7670 }
7671
7672 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7673                             dma_addr_t map, u32 len, u32 flags,
7674                             u32 mss, u32 vlan)
7675 {
7676         struct tg3 *tp = tnapi->tp;
7677         bool hwbug = false;
7678
7679         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7680                 hwbug = true;
7681
7682         if (tg3_4g_overflow_test(map, len))
7683                 hwbug = true;
7684
7685         if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7686                 hwbug = true;
7687
7688         if (tg3_40bit_overflow_test(tp, map, len))
7689                 hwbug = true;
7690
7691         if (tp->dma_limit) {
7692                 u32 prvidx = *entry;
7693                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7694                 while (len > tp->dma_limit && *budget) {
7695                         u32 frag_len = tp->dma_limit;
7696                         len -= tp->dma_limit;
7697
7698                         /* Avoid the 8byte DMA problem */
7699                         if (len <= 8) {
7700                                 len += tp->dma_limit / 2;
7701                                 frag_len = tp->dma_limit / 2;
7702                         }
7703
7704                         tnapi->tx_buffers[*entry].fragmented = true;
7705
7706                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7707                                       frag_len, tmp_flag, mss, vlan);
7708                         *budget -= 1;
7709                         prvidx = *entry;
7710                         *entry = NEXT_TX(*entry);
7711
7712                         map += frag_len;
7713                 }
7714
7715                 if (len) {
7716                         if (*budget) {
7717                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7718                                               len, flags, mss, vlan);
7719                                 *budget -= 1;
7720                                 *entry = NEXT_TX(*entry);
7721                         } else {
7722                                 hwbug = true;
7723                                 tnapi->tx_buffers[prvidx].fragmented = false;
7724                         }
7725                 }
7726         } else {
7727                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7728                               len, flags, mss, vlan);
7729                 *entry = NEXT_TX(*entry);
7730         }
7731
7732         return hwbug;
7733 }
7734
7735 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7736 {
7737         int i;
7738         struct sk_buff *skb;
7739         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7740
7741         skb = txb->skb;
7742         txb->skb = NULL;
7743
7744         pci_unmap_single(tnapi->tp->pdev,
7745                          dma_unmap_addr(txb, mapping),
7746                          skb_headlen(skb),
7747                          PCI_DMA_TODEVICE);
7748
7749         while (txb->fragmented) {
7750                 txb->fragmented = false;
7751                 entry = NEXT_TX(entry);
7752                 txb = &tnapi->tx_buffers[entry];
7753         }
7754
7755         for (i = 0; i <= last; i++) {
7756                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7757
7758                 entry = NEXT_TX(entry);
7759                 txb = &tnapi->tx_buffers[entry];
7760
7761                 pci_unmap_page(tnapi->tp->pdev,
7762                                dma_unmap_addr(txb, mapping),
7763                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7764
7765                 while (txb->fragmented) {
7766                         txb->fragmented = false;
7767                         entry = NEXT_TX(entry);
7768                         txb = &tnapi->tx_buffers[entry];
7769                 }
7770         }
7771 }
7772
7773 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7774 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7775                                        struct sk_buff **pskb,
7776                                        u32 *entry, u32 *budget,
7777                                        u32 base_flags, u32 mss, u32 vlan)
7778 {
7779         struct tg3 *tp = tnapi->tp;
7780         struct sk_buff *new_skb, *skb = *pskb;
7781         dma_addr_t new_addr = 0;
7782         int ret = 0;
7783
7784         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7785                 new_skb = skb_copy(skb, GFP_ATOMIC);
7786         else {
7787                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7788
7789                 new_skb = skb_copy_expand(skb,
7790                                           skb_headroom(skb) + more_headroom,
7791                                           skb_tailroom(skb), GFP_ATOMIC);
7792         }
7793
7794         if (!new_skb) {
7795                 ret = -1;
7796         } else {
7797                 /* New SKB is guaranteed to be linear. */
7798                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7799                                           PCI_DMA_TODEVICE);
7800                 /* Make sure the mapping succeeded */
7801                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7802                         dev_kfree_skb_any(new_skb);
7803                         ret = -1;
7804                 } else {
7805                         u32 save_entry = *entry;
7806
7807                         base_flags |= TXD_FLAG_END;
7808
7809                         tnapi->tx_buffers[*entry].skb = new_skb;
7810                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7811                                            mapping, new_addr);
7812
7813                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7814                                             new_skb->len, base_flags,
7815                                             mss, vlan)) {
7816                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7817                                 dev_kfree_skb_any(new_skb);
7818                                 ret = -1;
7819                         }
7820                 }
7821         }
7822
7823         dev_consume_skb_any(skb);
7824         *pskb = new_skb;
7825         return ret;
7826 }
7827
7828 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7829 {
7830         /* Check if we will never have enough descriptors,
7831          * as gso_segs can be more than current ring size
7832          */
7833         return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7834 }
7835
7836 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7837
7838 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7839  * indicated in tg3_tx_frag_set()
7840  */
7841 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7842                        struct netdev_queue *txq, struct sk_buff *skb)
7843 {
7844         struct sk_buff *segs, *nskb;
7845         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7846
7847         /* Estimate the number of fragments in the worst case */
7848         if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7849                 netif_tx_stop_queue(txq);
7850
7851                 /* netif_tx_stop_queue() must be done before checking
7852                  * checking tx index in tg3_tx_avail() below, because in
7853                  * tg3_tx(), we update tx index before checking for
7854                  * netif_tx_queue_stopped().
7855                  */
7856                 smp_mb();
7857                 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7858                         return NETDEV_TX_BUSY;
7859
7860                 netif_tx_wake_queue(txq);
7861         }
7862
7863         segs = skb_gso_segment(skb, tp->dev->features &
7864                                     ~(NETIF_F_TSO | NETIF_F_TSO6));
7865         if (IS_ERR(segs) || !segs)
7866                 goto tg3_tso_bug_end;
7867
7868         do {
7869                 nskb = segs;
7870                 segs = segs->next;
7871                 nskb->next = NULL;
7872                 tg3_start_xmit(nskb, tp->dev);
7873         } while (segs);
7874
7875 tg3_tso_bug_end:
7876         dev_consume_skb_any(skb);
7877
7878         return NETDEV_TX_OK;
7879 }
7880
7881 /* hard_start_xmit for all devices */
7882 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7883 {
7884         struct tg3 *tp = netdev_priv(dev);
7885         u32 len, entry, base_flags, mss, vlan = 0;
7886         u32 budget;
7887         int i = -1, would_hit_hwbug;
7888         dma_addr_t mapping;
7889         struct tg3_napi *tnapi;
7890         struct netdev_queue *txq;
7891         unsigned int last;
7892         struct iphdr *iph = NULL;
7893         struct tcphdr *tcph = NULL;
7894         __sum16 tcp_csum = 0, ip_csum = 0;
7895         __be16 ip_tot_len = 0;
7896
7897         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7898         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7899         if (tg3_flag(tp, ENABLE_TSS))
7900                 tnapi++;
7901
7902         budget = tg3_tx_avail(tnapi);
7903
7904         /* We are running in BH disabled context with netif_tx_lock
7905          * and TX reclaim runs via tp->napi.poll inside of a software
7906          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7907          * no IRQ context deadlocks to worry about either.  Rejoice!
7908          */
7909         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7910                 if (!netif_tx_queue_stopped(txq)) {
7911                         netif_tx_stop_queue(txq);
7912
7913                         /* This is a hard error, log it. */
7914                         netdev_err(dev,
7915                                    "BUG! Tx Ring full when queue awake!\n");
7916                 }
7917                 return NETDEV_TX_BUSY;
7918         }
7919
7920         entry = tnapi->tx_prod;
7921         base_flags = 0;
7922
7923         mss = skb_shinfo(skb)->gso_size;
7924         if (mss) {
7925                 u32 tcp_opt_len, hdr_len;
7926
7927                 if (skb_cow_head(skb, 0))
7928                         goto drop;
7929
7930                 iph = ip_hdr(skb);
7931                 tcp_opt_len = tcp_optlen(skb);
7932
7933                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7934
7935                 /* HW/FW can not correctly segment packets that have been
7936                  * vlan encapsulated.
7937                  */
7938                 if (skb->protocol == htons(ETH_P_8021Q) ||
7939                     skb->protocol == htons(ETH_P_8021AD)) {
7940                         if (tg3_tso_bug_gso_check(tnapi, skb))
7941                                 return tg3_tso_bug(tp, tnapi, txq, skb);
7942                         goto drop;
7943                 }
7944
7945                 if (!skb_is_gso_v6(skb)) {
7946                         if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7947                             tg3_flag(tp, TSO_BUG)) {
7948                                 if (tg3_tso_bug_gso_check(tnapi, skb))
7949                                         return tg3_tso_bug(tp, tnapi, txq, skb);
7950                                 goto drop;
7951                         }
7952                         ip_csum = iph->check;
7953                         ip_tot_len = iph->tot_len;
7954                         iph->check = 0;
7955                         iph->tot_len = htons(mss + hdr_len);
7956                 }
7957
7958                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7959                                TXD_FLAG_CPU_POST_DMA);
7960
7961                 tcph = tcp_hdr(skb);
7962                 tcp_csum = tcph->check;
7963
7964                 if (tg3_flag(tp, HW_TSO_1) ||
7965                     tg3_flag(tp, HW_TSO_2) ||
7966                     tg3_flag(tp, HW_TSO_3)) {
7967                         tcph->check = 0;
7968                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7969                 } else {
7970                         tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7971                                                          0, IPPROTO_TCP, 0);
7972                 }
7973
7974                 if (tg3_flag(tp, HW_TSO_3)) {
7975                         mss |= (hdr_len & 0xc) << 12;
7976                         if (hdr_len & 0x10)
7977                                 base_flags |= 0x00000010;
7978                         base_flags |= (hdr_len & 0x3e0) << 5;
7979                 } else if (tg3_flag(tp, HW_TSO_2))
7980                         mss |= hdr_len << 9;
7981                 else if (tg3_flag(tp, HW_TSO_1) ||
7982                          tg3_asic_rev(tp) == ASIC_REV_5705) {
7983                         if (tcp_opt_len || iph->ihl > 5) {
7984                                 int tsflags;
7985
7986                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7987                                 mss |= (tsflags << 11);
7988                         }
7989                 } else {
7990                         if (tcp_opt_len || iph->ihl > 5) {
7991                                 int tsflags;
7992
7993                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7994                                 base_flags |= tsflags << 12;
7995                         }
7996                 }
7997         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
7998                 /* HW/FW can not correctly checksum packets that have been
7999                  * vlan encapsulated.
8000                  */
8001                 if (skb->protocol == htons(ETH_P_8021Q) ||
8002                     skb->protocol == htons(ETH_P_8021AD)) {
8003                         if (skb_checksum_help(skb))
8004                                 goto drop;
8005                 } else  {
8006                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
8007                 }
8008         }
8009
8010         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8011             !mss && skb->len > VLAN_ETH_FRAME_LEN)
8012                 base_flags |= TXD_FLAG_JMB_PKT;
8013
8014         if (skb_vlan_tag_present(skb)) {
8015                 base_flags |= TXD_FLAG_VLAN;
8016                 vlan = skb_vlan_tag_get(skb);
8017         }
8018
8019         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8020             tg3_flag(tp, TX_TSTAMP_EN)) {
8021                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8022                 base_flags |= TXD_FLAG_HWTSTAMP;
8023         }
8024
8025         len = skb_headlen(skb);
8026
8027         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8028         if (pci_dma_mapping_error(tp->pdev, mapping))
8029                 goto drop;
8030
8031
8032         tnapi->tx_buffers[entry].skb = skb;
8033         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8034
8035         would_hit_hwbug = 0;
8036
8037         if (tg3_flag(tp, 5701_DMA_BUG))
8038                 would_hit_hwbug = 1;
8039
8040         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8041                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8042                             mss, vlan)) {
8043                 would_hit_hwbug = 1;
8044         } else if (skb_shinfo(skb)->nr_frags > 0) {
8045                 u32 tmp_mss = mss;
8046
8047                 if (!tg3_flag(tp, HW_TSO_1) &&
8048                     !tg3_flag(tp, HW_TSO_2) &&
8049                     !tg3_flag(tp, HW_TSO_3))
8050                         tmp_mss = 0;
8051
8052                 /* Now loop through additional data
8053                  * fragments, and queue them.
8054                  */
8055                 last = skb_shinfo(skb)->nr_frags - 1;
8056                 for (i = 0; i <= last; i++) {
8057                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8058
8059                         len = skb_frag_size(frag);
8060                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8061                                                    len, DMA_TO_DEVICE);
8062
8063                         tnapi->tx_buffers[entry].skb = NULL;
8064                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8065                                            mapping);
8066                         if (dma_mapping_error(&tp->pdev->dev, mapping))
8067                                 goto dma_error;
8068
8069                         if (!budget ||
8070                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8071                                             len, base_flags |
8072                                             ((i == last) ? TXD_FLAG_END : 0),
8073                                             tmp_mss, vlan)) {
8074                                 would_hit_hwbug = 1;
8075                                 break;
8076                         }
8077                 }
8078         }
8079
8080         if (would_hit_hwbug) {
8081                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8082
8083                 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8084                         /* If it's a TSO packet, do GSO instead of
8085                          * allocating and copying to a large linear SKB
8086                          */
8087                         if (ip_tot_len) {
8088                                 iph->check = ip_csum;
8089                                 iph->tot_len = ip_tot_len;
8090                         }
8091                         tcph->check = tcp_csum;
8092                         return tg3_tso_bug(tp, tnapi, txq, skb);
8093                 }
8094
8095                 /* If the workaround fails due to memory/mapping
8096                  * failure, silently drop this packet.
8097                  */
8098                 entry = tnapi->tx_prod;
8099                 budget = tg3_tx_avail(tnapi);
8100                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8101                                                 base_flags, mss, vlan))
8102                         goto drop_nofree;
8103         }
8104
8105         skb_tx_timestamp(skb);
8106         netdev_tx_sent_queue(txq, skb->len);
8107
8108         /* Sync BD data before updating mailbox */
8109         wmb();
8110
8111         tnapi->tx_prod = entry;
8112         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8113                 netif_tx_stop_queue(txq);
8114
8115                 /* netif_tx_stop_queue() must be done before checking
8116                  * checking tx index in tg3_tx_avail() below, because in
8117                  * tg3_tx(), we update tx index before checking for
8118                  * netif_tx_queue_stopped().
8119                  */
8120                 smp_mb();
8121                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8122                         netif_tx_wake_queue(txq);
8123         }
8124
8125         if (!skb->xmit_more || netif_xmit_stopped(txq)) {
8126                 /* Packets are ready, update Tx producer idx on card. */
8127                 tw32_tx_mbox(tnapi->prodmbox, entry);
8128                 mmiowb();
8129         }
8130
8131         return NETDEV_TX_OK;
8132
8133 dma_error:
8134         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8135         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8136 drop:
8137         dev_kfree_skb_any(skb);
8138 drop_nofree:
8139         tp->tx_dropped++;
8140         return NETDEV_TX_OK;
8141 }
8142
8143 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8144 {
8145         if (enable) {
8146                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8147                                   MAC_MODE_PORT_MODE_MASK);
8148
8149                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8150
8151                 if (!tg3_flag(tp, 5705_PLUS))
8152                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8153
8154                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8155                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8156                 else
8157                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8158         } else {
8159                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8160
8161                 if (tg3_flag(tp, 5705_PLUS) ||
8162                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8163                     tg3_asic_rev(tp) == ASIC_REV_5700)
8164                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8165         }
8166
8167         tw32(MAC_MODE, tp->mac_mode);
8168         udelay(40);
8169 }
8170
8171 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8172 {
8173         u32 val, bmcr, mac_mode, ptest = 0;
8174
8175         tg3_phy_toggle_apd(tp, false);
8176         tg3_phy_toggle_automdix(tp, false);
8177
8178         if (extlpbk && tg3_phy_set_extloopbk(tp))
8179                 return -EIO;
8180
8181         bmcr = BMCR_FULLDPLX;
8182         switch (speed) {
8183         case SPEED_10:
8184                 break;
8185         case SPEED_100:
8186                 bmcr |= BMCR_SPEED100;
8187                 break;
8188         case SPEED_1000:
8189         default:
8190                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8191                         speed = SPEED_100;
8192                         bmcr |= BMCR_SPEED100;
8193                 } else {
8194                         speed = SPEED_1000;
8195                         bmcr |= BMCR_SPEED1000;
8196                 }
8197         }
8198
8199         if (extlpbk) {
8200                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8201                         tg3_readphy(tp, MII_CTRL1000, &val);
8202                         val |= CTL1000_AS_MASTER |
8203                                CTL1000_ENABLE_MASTER;
8204                         tg3_writephy(tp, MII_CTRL1000, val);
8205                 } else {
8206                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8207                                 MII_TG3_FET_PTEST_TRIM_2;
8208                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8209                 }
8210         } else
8211                 bmcr |= BMCR_LOOPBACK;
8212
8213         tg3_writephy(tp, MII_BMCR, bmcr);
8214
8215         /* The write needs to be flushed for the FETs */
8216         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8217                 tg3_readphy(tp, MII_BMCR, &bmcr);
8218
8219         udelay(40);
8220
8221         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8222             tg3_asic_rev(tp) == ASIC_REV_5785) {
8223                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8224                              MII_TG3_FET_PTEST_FRC_TX_LINK |
8225                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
8226
8227                 /* The write needs to be flushed for the AC131 */
8228                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8229         }
8230
8231         /* Reset to prevent losing 1st rx packet intermittently */
8232         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8233             tg3_flag(tp, 5780_CLASS)) {
8234                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8235                 udelay(10);
8236                 tw32_f(MAC_RX_MODE, tp->rx_mode);
8237         }
8238
8239         mac_mode = tp->mac_mode &
8240                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8241         if (speed == SPEED_1000)
8242                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8243         else
8244                 mac_mode |= MAC_MODE_PORT_MODE_MII;
8245
8246         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8247                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8248
8249                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8250                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8251                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8252                         mac_mode |= MAC_MODE_LINK_POLARITY;
8253
8254                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8255                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8256         }
8257
8258         tw32(MAC_MODE, mac_mode);
8259         udelay(40);
8260
8261         return 0;
8262 }
8263
8264 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8265 {
8266         struct tg3 *tp = netdev_priv(dev);
8267
8268         if (features & NETIF_F_LOOPBACK) {
8269                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8270                         return;
8271
8272                 spin_lock_bh(&tp->lock);
8273                 tg3_mac_loopback(tp, true);
8274                 netif_carrier_on(tp->dev);
8275                 spin_unlock_bh(&tp->lock);
8276                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8277         } else {
8278                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8279                         return;
8280
8281                 spin_lock_bh(&tp->lock);
8282                 tg3_mac_loopback(tp, false);
8283                 /* Force link status check */
8284                 tg3_setup_phy(tp, true);
8285                 spin_unlock_bh(&tp->lock);
8286                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8287         }
8288 }
8289
8290 static netdev_features_t tg3_fix_features(struct net_device *dev,
8291         netdev_features_t features)
8292 {
8293         struct tg3 *tp = netdev_priv(dev);
8294
8295         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8296                 features &= ~NETIF_F_ALL_TSO;
8297
8298         return features;
8299 }
8300
8301 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8302 {
8303         netdev_features_t changed = dev->features ^ features;
8304
8305         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8306                 tg3_set_loopback(dev, features);
8307
8308         return 0;
8309 }
8310
8311 static void tg3_rx_prodring_free(struct tg3 *tp,
8312                                  struct tg3_rx_prodring_set *tpr)
8313 {
8314         int i;
8315
8316         if (tpr != &tp->napi[0].prodring) {
8317                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8318                      i = (i + 1) & tp->rx_std_ring_mask)
8319                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8320                                         tp->rx_pkt_map_sz);
8321
8322                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8323                         for (i = tpr->rx_jmb_cons_idx;
8324                              i != tpr->rx_jmb_prod_idx;
8325                              i = (i + 1) & tp->rx_jmb_ring_mask) {
8326                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8327                                                 TG3_RX_JMB_MAP_SZ);
8328                         }
8329                 }
8330
8331                 return;
8332         }
8333
8334         for (i = 0; i <= tp->rx_std_ring_mask; i++)
8335                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8336                                 tp->rx_pkt_map_sz);
8337
8338         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8339                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8340                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8341                                         TG3_RX_JMB_MAP_SZ);
8342         }
8343 }
8344
8345 /* Initialize rx rings for packet processing.
8346  *
8347  * The chip has been shut down and the driver detached from
8348  * the networking, so no interrupts or new tx packets will
8349  * end up in the driver.  tp->{tx,}lock are held and thus
8350  * we may not sleep.
8351  */
8352 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8353                                  struct tg3_rx_prodring_set *tpr)
8354 {
8355         u32 i, rx_pkt_dma_sz;
8356
8357         tpr->rx_std_cons_idx = 0;
8358         tpr->rx_std_prod_idx = 0;
8359         tpr->rx_jmb_cons_idx = 0;
8360         tpr->rx_jmb_prod_idx = 0;
8361
8362         if (tpr != &tp->napi[0].prodring) {
8363                 memset(&tpr->rx_std_buffers[0], 0,
8364                        TG3_RX_STD_BUFF_RING_SIZE(tp));
8365                 if (tpr->rx_jmb_buffers)
8366                         memset(&tpr->rx_jmb_buffers[0], 0,
8367                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
8368                 goto done;
8369         }
8370
8371         /* Zero out all descriptors. */
8372         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8373
8374         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8375         if (tg3_flag(tp, 5780_CLASS) &&
8376             tp->dev->mtu > ETH_DATA_LEN)
8377                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8378         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8379
8380         /* Initialize invariants of the rings, we only set this
8381          * stuff once.  This works because the card does not
8382          * write into the rx buffer posting rings.
8383          */
8384         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8385                 struct tg3_rx_buffer_desc *rxd;
8386
8387                 rxd = &tpr->rx_std[i];
8388                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8389                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8390                 rxd->opaque = (RXD_OPAQUE_RING_STD |
8391                                (i << RXD_OPAQUE_INDEX_SHIFT));
8392         }
8393
8394         /* Now allocate fresh SKBs for each rx ring. */
8395         for (i = 0; i < tp->rx_pending; i++) {
8396                 unsigned int frag_size;
8397
8398                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8399                                       &frag_size) < 0) {
8400                         netdev_warn(tp->dev,
8401                                     "Using a smaller RX standard ring. Only "
8402                                     "%d out of %d buffers were allocated "
8403                                     "successfully\n", i, tp->rx_pending);
8404                         if (i == 0)
8405                                 goto initfail;
8406                         tp->rx_pending = i;
8407                         break;
8408                 }
8409         }
8410
8411         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8412                 goto done;
8413
8414         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8415
8416         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8417                 goto done;
8418
8419         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8420                 struct tg3_rx_buffer_desc *rxd;
8421
8422                 rxd = &tpr->rx_jmb[i].std;
8423                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8424                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8425                                   RXD_FLAG_JUMBO;
8426                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8427                        (i << RXD_OPAQUE_INDEX_SHIFT));
8428         }
8429
8430         for (i = 0; i < tp->rx_jumbo_pending; i++) {
8431                 unsigned int frag_size;
8432
8433                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8434                                       &frag_size) < 0) {
8435                         netdev_warn(tp->dev,
8436                                     "Using a smaller RX jumbo ring. Only %d "
8437                                     "out of %d buffers were allocated "
8438                                     "successfully\n", i, tp->rx_jumbo_pending);
8439                         if (i == 0)
8440                                 goto initfail;
8441                         tp->rx_jumbo_pending = i;
8442                         break;
8443                 }
8444         }
8445
8446 done:
8447         return 0;
8448
8449 initfail:
8450         tg3_rx_prodring_free(tp, tpr);
8451         return -ENOMEM;
8452 }
8453
8454 static void tg3_rx_prodring_fini(struct tg3 *tp,
8455                                  struct tg3_rx_prodring_set *tpr)
8456 {
8457         kfree(tpr->rx_std_buffers);
8458         tpr->rx_std_buffers = NULL;
8459         kfree(tpr->rx_jmb_buffers);
8460         tpr->rx_jmb_buffers = NULL;
8461         if (tpr->rx_std) {
8462                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8463                                   tpr->rx_std, tpr->rx_std_mapping);
8464                 tpr->rx_std = NULL;
8465         }
8466         if (tpr->rx_jmb) {
8467                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8468                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
8469                 tpr->rx_jmb = NULL;
8470         }
8471 }
8472
8473 static int tg3_rx_prodring_init(struct tg3 *tp,
8474                                 struct tg3_rx_prodring_set *tpr)
8475 {
8476         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8477                                       GFP_KERNEL);
8478         if (!tpr->rx_std_buffers)
8479                 return -ENOMEM;
8480
8481         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8482                                          TG3_RX_STD_RING_BYTES(tp),
8483                                          &tpr->rx_std_mapping,
8484                                          GFP_KERNEL);
8485         if (!tpr->rx_std)
8486                 goto err_out;
8487
8488         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8489                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8490                                               GFP_KERNEL);
8491                 if (!tpr->rx_jmb_buffers)
8492                         goto err_out;
8493
8494                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8495                                                  TG3_RX_JMB_RING_BYTES(tp),
8496                                                  &tpr->rx_jmb_mapping,
8497                                                  GFP_KERNEL);
8498                 if (!tpr->rx_jmb)
8499                         goto err_out;
8500         }
8501
8502         return 0;
8503
8504 err_out:
8505         tg3_rx_prodring_fini(tp, tpr);
8506         return -ENOMEM;
8507 }
8508
8509 /* Free up pending packets in all rx/tx rings.
8510  *
8511  * The chip has been shut down and the driver detached from
8512  * the networking, so no interrupts or new tx packets will
8513  * end up in the driver.  tp->{tx,}lock is not held and we are not
8514  * in an interrupt context and thus may sleep.
8515  */
8516 static void tg3_free_rings(struct tg3 *tp)
8517 {
8518         int i, j;
8519
8520         for (j = 0; j < tp->irq_cnt; j++) {
8521                 struct tg3_napi *tnapi = &tp->napi[j];
8522
8523                 tg3_rx_prodring_free(tp, &tnapi->prodring);
8524
8525                 if (!tnapi->tx_buffers)
8526                         continue;
8527
8528                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8529                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8530
8531                         if (!skb)
8532                                 continue;
8533
8534                         tg3_tx_skb_unmap(tnapi, i,
8535                                          skb_shinfo(skb)->nr_frags - 1);
8536
8537                         dev_consume_skb_any(skb);
8538                 }
8539                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8540         }
8541 }
8542
8543 /* Initialize tx/rx rings for packet processing.
8544  *
8545  * The chip has been shut down and the driver detached from
8546  * the networking, so no interrupts or new tx packets will
8547  * end up in the driver.  tp->{tx,}lock are held and thus
8548  * we may not sleep.
8549  */
8550 static int tg3_init_rings(struct tg3 *tp)
8551 {
8552         int i;
8553
8554         /* Free up all the SKBs. */
8555         tg3_free_rings(tp);
8556
8557         for (i = 0; i < tp->irq_cnt; i++) {
8558                 struct tg3_napi *tnapi = &tp->napi[i];
8559
8560                 tnapi->last_tag = 0;
8561                 tnapi->last_irq_tag = 0;
8562                 tnapi->hw_status->status = 0;
8563                 tnapi->hw_status->status_tag = 0;
8564                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8565
8566                 tnapi->tx_prod = 0;
8567                 tnapi->tx_cons = 0;
8568                 if (tnapi->tx_ring)
8569                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8570
8571                 tnapi->rx_rcb_ptr = 0;
8572                 if (tnapi->rx_rcb)
8573                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8574
8575                 if (tnapi->prodring.rx_std &&
8576                     tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8577                         tg3_free_rings(tp);
8578                         return -ENOMEM;
8579                 }
8580         }
8581
8582         return 0;
8583 }
8584
8585 static void tg3_mem_tx_release(struct tg3 *tp)
8586 {
8587         int i;
8588
8589         for (i = 0; i < tp->irq_max; i++) {
8590                 struct tg3_napi *tnapi = &tp->napi[i];
8591
8592                 if (tnapi->tx_ring) {
8593                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8594                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
8595                         tnapi->tx_ring = NULL;
8596                 }
8597
8598                 kfree(tnapi->tx_buffers);
8599                 tnapi->tx_buffers = NULL;
8600         }
8601 }
8602
8603 static int tg3_mem_tx_acquire(struct tg3 *tp)
8604 {
8605         int i;
8606         struct tg3_napi *tnapi = &tp->napi[0];
8607
8608         /* If multivector TSS is enabled, vector 0 does not handle
8609          * tx interrupts.  Don't allocate any resources for it.
8610          */
8611         if (tg3_flag(tp, ENABLE_TSS))
8612                 tnapi++;
8613
8614         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8615                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8616                                             TG3_TX_RING_SIZE, GFP_KERNEL);
8617                 if (!tnapi->tx_buffers)
8618                         goto err_out;
8619
8620                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8621                                                     TG3_TX_RING_BYTES,
8622                                                     &tnapi->tx_desc_mapping,
8623                                                     GFP_KERNEL);
8624                 if (!tnapi->tx_ring)
8625                         goto err_out;
8626         }
8627
8628         return 0;
8629
8630 err_out:
8631         tg3_mem_tx_release(tp);
8632         return -ENOMEM;
8633 }
8634
8635 static void tg3_mem_rx_release(struct tg3 *tp)
8636 {
8637         int i;
8638
8639         for (i = 0; i < tp->irq_max; i++) {
8640                 struct tg3_napi *tnapi = &tp->napi[i];
8641
8642                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8643
8644                 if (!tnapi->rx_rcb)
8645                         continue;
8646
8647                 dma_free_coherent(&tp->pdev->dev,
8648                                   TG3_RX_RCB_RING_BYTES(tp),
8649                                   tnapi->rx_rcb,
8650                                   tnapi->rx_rcb_mapping);
8651                 tnapi->rx_rcb = NULL;
8652         }
8653 }
8654
8655 static int tg3_mem_rx_acquire(struct tg3 *tp)
8656 {
8657         unsigned int i, limit;
8658
8659         limit = tp->rxq_cnt;
8660
8661         /* If RSS is enabled, we need a (dummy) producer ring
8662          * set on vector zero.  This is the true hw prodring.
8663          */
8664         if (tg3_flag(tp, ENABLE_RSS))
8665                 limit++;
8666
8667         for (i = 0; i < limit; i++) {
8668                 struct tg3_napi *tnapi = &tp->napi[i];
8669
8670                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8671                         goto err_out;
8672
8673                 /* If multivector RSS is enabled, vector 0
8674                  * does not handle rx or tx interrupts.
8675                  * Don't allocate any resources for it.
8676                  */
8677                 if (!i && tg3_flag(tp, ENABLE_RSS))
8678                         continue;
8679
8680                 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8681                                                     TG3_RX_RCB_RING_BYTES(tp),
8682                                                     &tnapi->rx_rcb_mapping,
8683                                                     GFP_KERNEL);
8684                 if (!tnapi->rx_rcb)
8685                         goto err_out;
8686         }
8687
8688         return 0;
8689
8690 err_out:
8691         tg3_mem_rx_release(tp);
8692         return -ENOMEM;
8693 }
8694
8695 /*
8696  * Must not be invoked with interrupt sources disabled and
8697  * the hardware shutdown down.
8698  */
8699 static void tg3_free_consistent(struct tg3 *tp)
8700 {
8701         int i;
8702
8703         for (i = 0; i < tp->irq_cnt; i++) {
8704                 struct tg3_napi *tnapi = &tp->napi[i];
8705
8706                 if (tnapi->hw_status) {
8707                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8708                                           tnapi->hw_status,
8709                                           tnapi->status_mapping);
8710                         tnapi->hw_status = NULL;
8711                 }
8712         }
8713
8714         tg3_mem_rx_release(tp);
8715         tg3_mem_tx_release(tp);
8716
8717         /* tp->hw_stats can be referenced safely:
8718          *     1. under rtnl_lock
8719          *     2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8720          */
8721         if (tp->hw_stats) {
8722                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8723                                   tp->hw_stats, tp->stats_mapping);
8724                 tp->hw_stats = NULL;
8725         }
8726 }
8727
8728 /*
8729  * Must not be invoked with interrupt sources disabled and
8730  * the hardware shutdown down.  Can sleep.
8731  */
8732 static int tg3_alloc_consistent(struct tg3 *tp)
8733 {
8734         int i;
8735
8736         tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8737                                            sizeof(struct tg3_hw_stats),
8738                                            &tp->stats_mapping, GFP_KERNEL);
8739         if (!tp->hw_stats)
8740                 goto err_out;
8741
8742         for (i = 0; i < tp->irq_cnt; i++) {
8743                 struct tg3_napi *tnapi = &tp->napi[i];
8744                 struct tg3_hw_status *sblk;
8745
8746                 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8747                                                        TG3_HW_STATUS_SIZE,
8748                                                        &tnapi->status_mapping,
8749                                                        GFP_KERNEL);
8750                 if (!tnapi->hw_status)
8751                         goto err_out;
8752
8753                 sblk = tnapi->hw_status;
8754
8755                 if (tg3_flag(tp, ENABLE_RSS)) {
8756                         u16 *prodptr = NULL;
8757
8758                         /*
8759                          * When RSS is enabled, the status block format changes
8760                          * slightly.  The "rx_jumbo_consumer", "reserved",
8761                          * and "rx_mini_consumer" members get mapped to the
8762                          * other three rx return ring producer indexes.
8763                          */
8764                         switch (i) {
8765                         case 1:
8766                                 prodptr = &sblk->idx[0].rx_producer;
8767                                 break;
8768                         case 2:
8769                                 prodptr = &sblk->rx_jumbo_consumer;
8770                                 break;
8771                         case 3:
8772                                 prodptr = &sblk->reserved;
8773                                 break;
8774                         case 4:
8775                                 prodptr = &sblk->rx_mini_consumer;
8776                                 break;
8777                         }
8778                         tnapi->rx_rcb_prod_idx = prodptr;
8779                 } else {
8780                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8781                 }
8782         }
8783
8784         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8785                 goto err_out;
8786
8787         return 0;
8788
8789 err_out:
8790         tg3_free_consistent(tp);
8791         return -ENOMEM;
8792 }
8793
8794 #define MAX_WAIT_CNT 1000
8795
8796 /* To stop a block, clear the enable bit and poll till it
8797  * clears.  tp->lock is held.
8798  */
8799 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8800 {
8801         unsigned int i;
8802         u32 val;
8803
8804         if (tg3_flag(tp, 5705_PLUS)) {
8805                 switch (ofs) {
8806                 case RCVLSC_MODE:
8807                 case DMAC_MODE:
8808                 case MBFREE_MODE:
8809                 case BUFMGR_MODE:
8810                 case MEMARB_MODE:
8811                         /* We can't enable/disable these bits of the
8812                          * 5705/5750, just say success.
8813                          */
8814                         return 0;
8815
8816                 default:
8817                         break;
8818                 }
8819         }
8820
8821         val = tr32(ofs);
8822         val &= ~enable_bit;
8823         tw32_f(ofs, val);
8824
8825         for (i = 0; i < MAX_WAIT_CNT; i++) {
8826                 if (pci_channel_offline(tp->pdev)) {
8827                         dev_err(&tp->pdev->dev,
8828                                 "tg3_stop_block device offline, "
8829                                 "ofs=%lx enable_bit=%x\n",
8830                                 ofs, enable_bit);
8831                         return -ENODEV;
8832                 }
8833
8834                 udelay(100);
8835                 val = tr32(ofs);
8836                 if ((val & enable_bit) == 0)
8837                         break;
8838         }
8839
8840         if (i == MAX_WAIT_CNT && !silent) {
8841                 dev_err(&tp->pdev->dev,
8842                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8843                         ofs, enable_bit);
8844                 return -ENODEV;
8845         }
8846
8847         return 0;
8848 }
8849
8850 /* tp->lock is held. */
8851 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8852 {
8853         int i, err;
8854
8855         tg3_disable_ints(tp);
8856
8857         if (pci_channel_offline(tp->pdev)) {
8858                 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8859                 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8860                 err = -ENODEV;
8861                 goto err_no_dev;
8862         }
8863
8864         tp->rx_mode &= ~RX_MODE_ENABLE;
8865         tw32_f(MAC_RX_MODE, tp->rx_mode);
8866         udelay(10);
8867
8868         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8869         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8870         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8871         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8872         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8873         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8874
8875         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8876         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8877         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8878         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8879         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8880         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8881         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8882
8883         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8884         tw32_f(MAC_MODE, tp->mac_mode);
8885         udelay(40);
8886
8887         tp->tx_mode &= ~TX_MODE_ENABLE;
8888         tw32_f(MAC_TX_MODE, tp->tx_mode);
8889
8890         for (i = 0; i < MAX_WAIT_CNT; i++) {
8891                 udelay(100);
8892                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8893                         break;
8894         }
8895         if (i >= MAX_WAIT_CNT) {
8896                 dev_err(&tp->pdev->dev,
8897                         "%s timed out, TX_MODE_ENABLE will not clear "
8898                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8899                 err |= -ENODEV;
8900         }
8901
8902         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8903         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8904         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8905
8906         tw32(FTQ_RESET, 0xffffffff);
8907         tw32(FTQ_RESET, 0x00000000);
8908
8909         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8910         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8911
8912 err_no_dev:
8913         for (i = 0; i < tp->irq_cnt; i++) {
8914                 struct tg3_napi *tnapi = &tp->napi[i];
8915                 if (tnapi->hw_status)
8916                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8917         }
8918
8919         return err;
8920 }
8921
8922 /* Save PCI command register before chip reset */
8923 static void tg3_save_pci_state(struct tg3 *tp)
8924 {
8925         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8926 }
8927
8928 /* Restore PCI state after chip reset */
8929 static void tg3_restore_pci_state(struct tg3 *tp)
8930 {
8931         u32 val;
8932
8933         /* Re-enable indirect register accesses. */
8934         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8935                                tp->misc_host_ctrl);
8936
8937         /* Set MAX PCI retry to zero. */
8938         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8939         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8940             tg3_flag(tp, PCIX_MODE))
8941                 val |= PCISTATE_RETRY_SAME_DMA;
8942         /* Allow reads and writes to the APE register and memory space. */
8943         if (tg3_flag(tp, ENABLE_APE))
8944                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8945                        PCISTATE_ALLOW_APE_SHMEM_WR |
8946                        PCISTATE_ALLOW_APE_PSPACE_WR;
8947         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8948
8949         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8950
8951         if (!tg3_flag(tp, PCI_EXPRESS)) {
8952                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8953                                       tp->pci_cacheline_sz);
8954                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8955                                       tp->pci_lat_timer);
8956         }
8957
8958         /* Make sure PCI-X relaxed ordering bit is clear. */
8959         if (tg3_flag(tp, PCIX_MODE)) {
8960                 u16 pcix_cmd;
8961
8962                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8963                                      &pcix_cmd);
8964                 pcix_cmd &= ~PCI_X_CMD_ERO;
8965                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8966                                       pcix_cmd);
8967         }
8968
8969         if (tg3_flag(tp, 5780_CLASS)) {
8970
8971                 /* Chip reset on 5780 will reset MSI enable bit,
8972                  * so need to restore it.
8973                  */
8974                 if (tg3_flag(tp, USING_MSI)) {
8975                         u16 ctrl;
8976
8977                         pci_read_config_word(tp->pdev,
8978                                              tp->msi_cap + PCI_MSI_FLAGS,
8979                                              &ctrl);
8980                         pci_write_config_word(tp->pdev,
8981                                               tp->msi_cap + PCI_MSI_FLAGS,
8982                                               ctrl | PCI_MSI_FLAGS_ENABLE);
8983                         val = tr32(MSGINT_MODE);
8984                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8985                 }
8986         }
8987 }
8988
8989 static void tg3_override_clk(struct tg3 *tp)
8990 {
8991         u32 val;
8992
8993         switch (tg3_asic_rev(tp)) {
8994         case ASIC_REV_5717:
8995                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
8996                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
8997                      TG3_CPMU_MAC_ORIDE_ENABLE);
8998                 break;
8999
9000         case ASIC_REV_5719:
9001         case ASIC_REV_5720:
9002                 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9003                 break;
9004
9005         default:
9006                 return;
9007         }
9008 }
9009
9010 static void tg3_restore_clk(struct tg3 *tp)
9011 {
9012         u32 val;
9013
9014         switch (tg3_asic_rev(tp)) {
9015         case ASIC_REV_5717:
9016                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9017                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9018                      val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9019                 break;
9020
9021         case ASIC_REV_5719:
9022         case ASIC_REV_5720:
9023                 val = tr32(TG3_CPMU_CLCK_ORIDE);
9024                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9025                 break;
9026
9027         default:
9028                 return;
9029         }
9030 }
9031
9032 /* tp->lock is held. */
9033 static int tg3_chip_reset(struct tg3 *tp)
9034         __releases(tp->lock)
9035         __acquires(tp->lock)
9036 {
9037         u32 val;
9038         void (*write_op)(struct tg3 *, u32, u32);
9039         int i, err;
9040
9041         if (!pci_device_is_present(tp->pdev))
9042                 return -ENODEV;
9043
9044         tg3_nvram_lock(tp);
9045
9046         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9047
9048         /* No matching tg3_nvram_unlock() after this because
9049          * chip reset below will undo the nvram lock.
9050          */
9051         tp->nvram_lock_cnt = 0;
9052
9053         /* GRC_MISC_CFG core clock reset will clear the memory
9054          * enable bit in PCI register 4 and the MSI enable bit
9055          * on some chips, so we save relevant registers here.
9056          */
9057         tg3_save_pci_state(tp);
9058
9059         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9060             tg3_flag(tp, 5755_PLUS))
9061                 tw32(GRC_FASTBOOT_PC, 0);
9062
9063         /*
9064          * We must avoid the readl() that normally takes place.
9065          * It locks machines, causes machine checks, and other
9066          * fun things.  So, temporarily disable the 5701
9067          * hardware workaround, while we do the reset.
9068          */
9069         write_op = tp->write32;
9070         if (write_op == tg3_write_flush_reg32)
9071                 tp->write32 = tg3_write32;
9072
9073         /* Prevent the irq handler from reading or writing PCI registers
9074          * during chip reset when the memory enable bit in the PCI command
9075          * register may be cleared.  The chip does not generate interrupt
9076          * at this time, but the irq handler may still be called due to irq
9077          * sharing or irqpoll.
9078          */
9079         tg3_flag_set(tp, CHIP_RESETTING);
9080         for (i = 0; i < tp->irq_cnt; i++) {
9081                 struct tg3_napi *tnapi = &tp->napi[i];
9082                 if (tnapi->hw_status) {
9083                         tnapi->hw_status->status = 0;
9084                         tnapi->hw_status->status_tag = 0;
9085                 }
9086                 tnapi->last_tag = 0;
9087                 tnapi->last_irq_tag = 0;
9088         }
9089         smp_mb();
9090
9091         tg3_full_unlock(tp);
9092
9093         for (i = 0; i < tp->irq_cnt; i++)
9094                 synchronize_irq(tp->napi[i].irq_vec);
9095
9096         tg3_full_lock(tp, 0);
9097
9098         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9099                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9100                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9101         }
9102
9103         /* do the reset */
9104         val = GRC_MISC_CFG_CORECLK_RESET;
9105
9106         if (tg3_flag(tp, PCI_EXPRESS)) {
9107                 /* Force PCIe 1.0a mode */
9108                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9109                     !tg3_flag(tp, 57765_PLUS) &&
9110                     tr32(TG3_PCIE_PHY_TSTCTL) ==
9111                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9112                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9113
9114                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9115                         tw32(GRC_MISC_CFG, (1 << 29));
9116                         val |= (1 << 29);
9117                 }
9118         }
9119
9120         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9121                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9122                 tw32(GRC_VCPU_EXT_CTRL,
9123                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9124         }
9125
9126         /* Set the clock to the highest frequency to avoid timeouts. With link
9127          * aware mode, the clock speed could be slow and bootcode does not
9128          * complete within the expected time. Override the clock to allow the
9129          * bootcode to finish sooner and then restore it.
9130          */
9131         tg3_override_clk(tp);
9132
9133         /* Manage gphy power for all CPMU absent PCIe devices. */
9134         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9135                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9136
9137         tw32(GRC_MISC_CFG, val);
9138
9139         /* restore 5701 hardware bug workaround write method */
9140         tp->write32 = write_op;
9141
9142         /* Unfortunately, we have to delay before the PCI read back.
9143          * Some 575X chips even will not respond to a PCI cfg access
9144          * when the reset command is given to the chip.
9145          *
9146          * How do these hardware designers expect things to work
9147          * properly if the PCI write is posted for a long period
9148          * of time?  It is always necessary to have some method by
9149          * which a register read back can occur to push the write
9150          * out which does the reset.
9151          *
9152          * For most tg3 variants the trick below was working.
9153          * Ho hum...
9154          */
9155         udelay(120);
9156
9157         /* Flush PCI posted writes.  The normal MMIO registers
9158          * are inaccessible at this time so this is the only
9159          * way to make this reliably (actually, this is no longer
9160          * the case, see above).  I tried to use indirect
9161          * register read/write but this upset some 5701 variants.
9162          */
9163         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9164
9165         udelay(120);
9166
9167         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9168                 u16 val16;
9169
9170                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9171                         int j;
9172                         u32 cfg_val;
9173
9174                         /* Wait for link training to complete.  */
9175                         for (j = 0; j < 5000; j++)
9176                                 udelay(100);
9177
9178                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9179                         pci_write_config_dword(tp->pdev, 0xc4,
9180                                                cfg_val | (1 << 15));
9181                 }
9182
9183                 /* Clear the "no snoop" and "relaxed ordering" bits. */
9184                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9185                 /*
9186                  * Older PCIe devices only support the 128 byte
9187                  * MPS setting.  Enforce the restriction.
9188                  */
9189                 if (!tg3_flag(tp, CPMU_PRESENT))
9190                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9191                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9192
9193                 /* Clear error status */
9194                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9195                                       PCI_EXP_DEVSTA_CED |
9196                                       PCI_EXP_DEVSTA_NFED |
9197                                       PCI_EXP_DEVSTA_FED |
9198                                       PCI_EXP_DEVSTA_URD);
9199         }
9200
9201         tg3_restore_pci_state(tp);
9202
9203         tg3_flag_clear(tp, CHIP_RESETTING);
9204         tg3_flag_clear(tp, ERROR_PROCESSED);
9205
9206         val = 0;
9207         if (tg3_flag(tp, 5780_CLASS))
9208                 val = tr32(MEMARB_MODE);
9209         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9210
9211         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9212                 tg3_stop_fw(tp);
9213                 tw32(0x5000, 0x400);
9214         }
9215
9216         if (tg3_flag(tp, IS_SSB_CORE)) {
9217                 /*
9218                  * BCM4785: In order to avoid repercussions from using
9219                  * potentially defective internal ROM, stop the Rx RISC CPU,
9220                  * which is not required.
9221                  */
9222                 tg3_stop_fw(tp);
9223                 tg3_halt_cpu(tp, RX_CPU_BASE);
9224         }
9225
9226         err = tg3_poll_fw(tp);
9227         if (err)
9228                 return err;
9229
9230         tw32(GRC_MODE, tp->grc_mode);
9231
9232         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9233                 val = tr32(0xc4);
9234
9235                 tw32(0xc4, val | (1 << 15));
9236         }
9237
9238         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9239             tg3_asic_rev(tp) == ASIC_REV_5705) {
9240                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9241                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9242                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9243                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9244         }
9245
9246         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9247                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9248                 val = tp->mac_mode;
9249         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9250                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9251                 val = tp->mac_mode;
9252         } else
9253                 val = 0;
9254
9255         tw32_f(MAC_MODE, val);
9256         udelay(40);
9257
9258         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9259
9260         tg3_mdio_start(tp);
9261
9262         if (tg3_flag(tp, PCI_EXPRESS) &&
9263             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9264             tg3_asic_rev(tp) != ASIC_REV_5785 &&
9265             !tg3_flag(tp, 57765_PLUS)) {
9266                 val = tr32(0x7c00);
9267
9268                 tw32(0x7c00, val | (1 << 25));
9269         }
9270
9271         tg3_restore_clk(tp);
9272
9273         /* Increase the core clock speed to fix tx timeout issue for 5762
9274          * with 100Mbps link speed.
9275          */
9276         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9277                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9278                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9279                      TG3_CPMU_MAC_ORIDE_ENABLE);
9280         }
9281
9282         /* Reprobe ASF enable state.  */
9283         tg3_flag_clear(tp, ENABLE_ASF);
9284         tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9285                            TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9286
9287         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9288         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9289         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9290                 u32 nic_cfg;
9291
9292                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9293                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9294                         tg3_flag_set(tp, ENABLE_ASF);
9295                         tp->last_event_jiffies = jiffies;
9296                         if (tg3_flag(tp, 5750_PLUS))
9297                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9298
9299                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9300                         if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9301                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9302                         if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9303                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9304                 }
9305         }
9306
9307         return 0;
9308 }
9309
9310 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9311 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9312 static void __tg3_set_rx_mode(struct net_device *);
9313
9314 /* tp->lock is held. */
9315 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9316 {
9317         int err;
9318
9319         tg3_stop_fw(tp);
9320
9321         tg3_write_sig_pre_reset(tp, kind);
9322
9323         tg3_abort_hw(tp, silent);
9324         err = tg3_chip_reset(tp);
9325
9326         __tg3_set_mac_addr(tp, false);
9327
9328         tg3_write_sig_legacy(tp, kind);
9329         tg3_write_sig_post_reset(tp, kind);
9330
9331         if (tp->hw_stats) {
9332                 /* Save the stats across chip resets... */
9333                 tg3_get_nstats(tp, &tp->net_stats_prev);
9334                 tg3_get_estats(tp, &tp->estats_prev);
9335
9336                 /* And make sure the next sample is new data */
9337                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9338         }
9339
9340         return err;
9341 }
9342
9343 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9344 {
9345         struct tg3 *tp = netdev_priv(dev);
9346         struct sockaddr *addr = p;
9347         int err = 0;
9348         bool skip_mac_1 = false;
9349
9350         if (!is_valid_ether_addr(addr->sa_data))
9351                 return -EADDRNOTAVAIL;
9352
9353         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9354
9355         if (!netif_running(dev))
9356                 return 0;
9357
9358         if (tg3_flag(tp, ENABLE_ASF)) {
9359                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9360
9361                 addr0_high = tr32(MAC_ADDR_0_HIGH);
9362                 addr0_low = tr32(MAC_ADDR_0_LOW);
9363                 addr1_high = tr32(MAC_ADDR_1_HIGH);
9364                 addr1_low = tr32(MAC_ADDR_1_LOW);
9365
9366                 /* Skip MAC addr 1 if ASF is using it. */
9367                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9368                     !(addr1_high == 0 && addr1_low == 0))
9369                         skip_mac_1 = true;
9370         }
9371         spin_lock_bh(&tp->lock);
9372         __tg3_set_mac_addr(tp, skip_mac_1);
9373         __tg3_set_rx_mode(dev);
9374         spin_unlock_bh(&tp->lock);
9375
9376         return err;
9377 }
9378
9379 /* tp->lock is held. */
9380 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9381                            dma_addr_t mapping, u32 maxlen_flags,
9382                            u32 nic_addr)
9383 {
9384         tg3_write_mem(tp,
9385                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9386                       ((u64) mapping >> 32));
9387         tg3_write_mem(tp,
9388                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9389                       ((u64) mapping & 0xffffffff));
9390         tg3_write_mem(tp,
9391                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9392                        maxlen_flags);
9393
9394         if (!tg3_flag(tp, 5705_PLUS))
9395                 tg3_write_mem(tp,
9396                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9397                               nic_addr);
9398 }
9399
9400
9401 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9402 {
9403         int i = 0;
9404
9405         if (!tg3_flag(tp, ENABLE_TSS)) {
9406                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9407                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9408                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9409         } else {
9410                 tw32(HOSTCC_TXCOL_TICKS, 0);
9411                 tw32(HOSTCC_TXMAX_FRAMES, 0);
9412                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9413
9414                 for (; i < tp->txq_cnt; i++) {
9415                         u32 reg;
9416
9417                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9418                         tw32(reg, ec->tx_coalesce_usecs);
9419                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9420                         tw32(reg, ec->tx_max_coalesced_frames);
9421                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9422                         tw32(reg, ec->tx_max_coalesced_frames_irq);
9423                 }
9424         }
9425
9426         for (; i < tp->irq_max - 1; i++) {
9427                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9428                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9429                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9430         }
9431 }
9432
9433 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9434 {
9435         int i = 0;
9436         u32 limit = tp->rxq_cnt;
9437
9438         if (!tg3_flag(tp, ENABLE_RSS)) {
9439                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9440                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9441                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9442                 limit--;
9443         } else {
9444                 tw32(HOSTCC_RXCOL_TICKS, 0);
9445                 tw32(HOSTCC_RXMAX_FRAMES, 0);
9446                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9447         }
9448
9449         for (; i < limit; i++) {
9450                 u32 reg;
9451
9452                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9453                 tw32(reg, ec->rx_coalesce_usecs);
9454                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9455                 tw32(reg, ec->rx_max_coalesced_frames);
9456                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9457                 tw32(reg, ec->rx_max_coalesced_frames_irq);
9458         }
9459
9460         for (; i < tp->irq_max - 1; i++) {
9461                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9462                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9463                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9464         }
9465 }
9466
9467 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9468 {
9469         tg3_coal_tx_init(tp, ec);
9470         tg3_coal_rx_init(tp, ec);
9471
9472         if (!tg3_flag(tp, 5705_PLUS)) {
9473                 u32 val = ec->stats_block_coalesce_usecs;
9474
9475                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9476                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9477
9478                 if (!tp->link_up)
9479                         val = 0;
9480
9481                 tw32(HOSTCC_STAT_COAL_TICKS, val);
9482         }
9483 }
9484
9485 /* tp->lock is held. */
9486 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9487 {
9488         u32 txrcb, limit;
9489
9490         /* Disable all transmit rings but the first. */
9491         if (!tg3_flag(tp, 5705_PLUS))
9492                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9493         else if (tg3_flag(tp, 5717_PLUS))
9494                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9495         else if (tg3_flag(tp, 57765_CLASS) ||
9496                  tg3_asic_rev(tp) == ASIC_REV_5762)
9497                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9498         else
9499                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9500
9501         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9502              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9503                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9504                               BDINFO_FLAGS_DISABLED);
9505 }
9506
9507 /* tp->lock is held. */
9508 static void tg3_tx_rcbs_init(struct tg3 *tp)
9509 {
9510         int i = 0;
9511         u32 txrcb = NIC_SRAM_SEND_RCB;
9512
9513         if (tg3_flag(tp, ENABLE_TSS))
9514                 i++;
9515
9516         for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9517                 struct tg3_napi *tnapi = &tp->napi[i];
9518
9519                 if (!tnapi->tx_ring)
9520                         continue;
9521
9522                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9523                                (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9524                                NIC_SRAM_TX_BUFFER_DESC);
9525         }
9526 }
9527
9528 /* tp->lock is held. */
9529 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9530 {
9531         u32 rxrcb, limit;
9532
9533         /* Disable all receive return rings but the first. */
9534         if (tg3_flag(tp, 5717_PLUS))
9535                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9536         else if (!tg3_flag(tp, 5705_PLUS))
9537                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9538         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9539                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
9540                  tg3_flag(tp, 57765_CLASS))
9541                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9542         else
9543                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9544
9545         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9546              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9547                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9548                               BDINFO_FLAGS_DISABLED);
9549 }
9550
9551 /* tp->lock is held. */
9552 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9553 {
9554         int i = 0;
9555         u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9556
9557         if (tg3_flag(tp, ENABLE_RSS))
9558                 i++;
9559
9560         for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9561                 struct tg3_napi *tnapi = &tp->napi[i];
9562
9563                 if (!tnapi->rx_rcb)
9564                         continue;
9565
9566                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9567                                (tp->rx_ret_ring_mask + 1) <<
9568                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9569         }
9570 }
9571
9572 /* tp->lock is held. */
9573 static void tg3_rings_reset(struct tg3 *tp)
9574 {
9575         int i;
9576         u32 stblk;
9577         struct tg3_napi *tnapi = &tp->napi[0];
9578
9579         tg3_tx_rcbs_disable(tp);
9580
9581         tg3_rx_ret_rcbs_disable(tp);
9582
9583         /* Disable interrupts */
9584         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9585         tp->napi[0].chk_msi_cnt = 0;
9586         tp->napi[0].last_rx_cons = 0;
9587         tp->napi[0].last_tx_cons = 0;
9588
9589         /* Zero mailbox registers. */
9590         if (tg3_flag(tp, SUPPORT_MSIX)) {
9591                 for (i = 1; i < tp->irq_max; i++) {
9592                         tp->napi[i].tx_prod = 0;
9593                         tp->napi[i].tx_cons = 0;
9594                         if (tg3_flag(tp, ENABLE_TSS))
9595                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
9596                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
9597                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9598                         tp->napi[i].chk_msi_cnt = 0;
9599                         tp->napi[i].last_rx_cons = 0;
9600                         tp->napi[i].last_tx_cons = 0;
9601                 }
9602                 if (!tg3_flag(tp, ENABLE_TSS))
9603                         tw32_mailbox(tp->napi[0].prodmbox, 0);
9604         } else {
9605                 tp->napi[0].tx_prod = 0;
9606                 tp->napi[0].tx_cons = 0;
9607                 tw32_mailbox(tp->napi[0].prodmbox, 0);
9608                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9609         }
9610
9611         /* Make sure the NIC-based send BD rings are disabled. */
9612         if (!tg3_flag(tp, 5705_PLUS)) {
9613                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9614                 for (i = 0; i < 16; i++)
9615                         tw32_tx_mbox(mbox + i * 8, 0);
9616         }
9617
9618         /* Clear status block in ram. */
9619         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9620
9621         /* Set status block DMA address */
9622         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9623              ((u64) tnapi->status_mapping >> 32));
9624         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9625              ((u64) tnapi->status_mapping & 0xffffffff));
9626
9627         stblk = HOSTCC_STATBLCK_RING1;
9628
9629         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9630                 u64 mapping = (u64)tnapi->status_mapping;
9631                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9632                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9633                 stblk += 8;
9634
9635                 /* Clear status block in ram. */
9636                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9637         }
9638
9639         tg3_tx_rcbs_init(tp);
9640         tg3_rx_ret_rcbs_init(tp);
9641 }
9642
9643 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9644 {
9645         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9646
9647         if (!tg3_flag(tp, 5750_PLUS) ||
9648             tg3_flag(tp, 5780_CLASS) ||
9649             tg3_asic_rev(tp) == ASIC_REV_5750 ||
9650             tg3_asic_rev(tp) == ASIC_REV_5752 ||
9651             tg3_flag(tp, 57765_PLUS))
9652                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9653         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9654                  tg3_asic_rev(tp) == ASIC_REV_5787)
9655                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9656         else
9657                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9658
9659         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9660         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9661
9662         val = min(nic_rep_thresh, host_rep_thresh);
9663         tw32(RCVBDI_STD_THRESH, val);
9664
9665         if (tg3_flag(tp, 57765_PLUS))
9666                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9667
9668         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9669                 return;
9670
9671         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9672
9673         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9674
9675         val = min(bdcache_maxcnt / 2, host_rep_thresh);
9676         tw32(RCVBDI_JUMBO_THRESH, val);
9677
9678         if (tg3_flag(tp, 57765_PLUS))
9679                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9680 }
9681
9682 static inline u32 calc_crc(unsigned char *buf, int len)
9683 {
9684         u32 reg;
9685         u32 tmp;
9686         int j, k;
9687
9688         reg = 0xffffffff;
9689
9690         for (j = 0; j < len; j++) {
9691                 reg ^= buf[j];
9692
9693                 for (k = 0; k < 8; k++) {
9694                         tmp = reg & 0x01;
9695
9696                         reg >>= 1;
9697
9698                         if (tmp)
9699                                 reg ^= 0xedb88320;
9700                 }
9701         }
9702
9703         return ~reg;
9704 }
9705
9706 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9707 {
9708         /* accept or reject all multicast frames */
9709         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9710         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9711         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9712         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9713 }
9714
9715 static void __tg3_set_rx_mode(struct net_device *dev)
9716 {
9717         struct tg3 *tp = netdev_priv(dev);
9718         u32 rx_mode;
9719
9720         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9721                                   RX_MODE_KEEP_VLAN_TAG);
9722
9723 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9724         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9725          * flag clear.
9726          */
9727         if (!tg3_flag(tp, ENABLE_ASF))
9728                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9729 #endif
9730
9731         if (dev->flags & IFF_PROMISC) {
9732                 /* Promiscuous mode. */
9733                 rx_mode |= RX_MODE_PROMISC;
9734         } else if (dev->flags & IFF_ALLMULTI) {
9735                 /* Accept all multicast. */
9736                 tg3_set_multi(tp, 1);
9737         } else if (netdev_mc_empty(dev)) {
9738                 /* Reject all multicast. */
9739                 tg3_set_multi(tp, 0);
9740         } else {
9741                 /* Accept one or more multicast(s). */
9742                 struct netdev_hw_addr *ha;
9743                 u32 mc_filter[4] = { 0, };
9744                 u32 regidx;
9745                 u32 bit;
9746                 u32 crc;
9747
9748                 netdev_for_each_mc_addr(ha, dev) {
9749                         crc = calc_crc(ha->addr, ETH_ALEN);
9750                         bit = ~crc & 0x7f;
9751                         regidx = (bit & 0x60) >> 5;
9752                         bit &= 0x1f;
9753                         mc_filter[regidx] |= (1 << bit);
9754                 }
9755
9756                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9757                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9758                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9759                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9760         }
9761
9762         if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9763                 rx_mode |= RX_MODE_PROMISC;
9764         } else if (!(dev->flags & IFF_PROMISC)) {
9765                 /* Add all entries into to the mac addr filter list */
9766                 int i = 0;
9767                 struct netdev_hw_addr *ha;
9768
9769                 netdev_for_each_uc_addr(ha, dev) {
9770                         __tg3_set_one_mac_addr(tp, ha->addr,
9771                                                i + TG3_UCAST_ADDR_IDX(tp));
9772                         i++;
9773                 }
9774         }
9775
9776         if (rx_mode != tp->rx_mode) {
9777                 tp->rx_mode = rx_mode;
9778                 tw32_f(MAC_RX_MODE, rx_mode);
9779                 udelay(10);
9780         }
9781 }
9782
9783 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9784 {
9785         int i;
9786
9787         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9788                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9789 }
9790
9791 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9792 {
9793         int i;
9794
9795         if (!tg3_flag(tp, SUPPORT_MSIX))
9796                 return;
9797
9798         if (tp->rxq_cnt == 1) {
9799                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9800                 return;
9801         }
9802
9803         /* Validate table against current IRQ count */
9804         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9805                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9806                         break;
9807         }
9808
9809         if (i != TG3_RSS_INDIR_TBL_SIZE)
9810                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9811 }
9812
9813 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9814 {
9815         int i = 0;
9816         u32 reg = MAC_RSS_INDIR_TBL_0;
9817
9818         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9819                 u32 val = tp->rss_ind_tbl[i];
9820                 i++;
9821                 for (; i % 8; i++) {
9822                         val <<= 4;
9823                         val |= tp->rss_ind_tbl[i];
9824                 }
9825                 tw32(reg, val);
9826                 reg += 4;
9827         }
9828 }
9829
9830 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9831 {
9832         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9833                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9834         else
9835                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9836 }
9837
9838 /* tp->lock is held. */
9839 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9840 {
9841         u32 val, rdmac_mode;
9842         int i, err, limit;
9843         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9844
9845         tg3_disable_ints(tp);
9846
9847         tg3_stop_fw(tp);
9848
9849         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9850
9851         if (tg3_flag(tp, INIT_COMPLETE))
9852                 tg3_abort_hw(tp, 1);
9853
9854         if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9855             !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9856                 tg3_phy_pull_config(tp);
9857                 tg3_eee_pull_config(tp, NULL);
9858                 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9859         }
9860
9861         /* Enable MAC control of LPI */
9862         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9863                 tg3_setup_eee(tp);
9864
9865         if (reset_phy)
9866                 tg3_phy_reset(tp);
9867
9868         err = tg3_chip_reset(tp);
9869         if (err)
9870                 return err;
9871
9872         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9873
9874         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9875                 val = tr32(TG3_CPMU_CTRL);
9876                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9877                 tw32(TG3_CPMU_CTRL, val);
9878
9879                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9880                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9881                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9882                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9883
9884                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9885                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9886                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9887                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9888
9889                 val = tr32(TG3_CPMU_HST_ACC);
9890                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9891                 val |= CPMU_HST_ACC_MACCLK_6_25;
9892                 tw32(TG3_CPMU_HST_ACC, val);
9893         }
9894
9895         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9896                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9897                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9898                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9899                 tw32(PCIE_PWR_MGMT_THRESH, val);
9900
9901                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9902                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9903
9904                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9905
9906                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9907                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9908         }
9909
9910         if (tg3_flag(tp, L1PLLPD_EN)) {
9911                 u32 grc_mode = tr32(GRC_MODE);
9912
9913                 /* Access the lower 1K of PL PCIE block registers. */
9914                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9915                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9916
9917                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9918                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9919                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9920
9921                 tw32(GRC_MODE, grc_mode);
9922         }
9923
9924         if (tg3_flag(tp, 57765_CLASS)) {
9925                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9926                         u32 grc_mode = tr32(GRC_MODE);
9927
9928                         /* Access the lower 1K of PL PCIE block registers. */
9929                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9930                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9931
9932                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9933                                    TG3_PCIE_PL_LO_PHYCTL5);
9934                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9935                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9936
9937                         tw32(GRC_MODE, grc_mode);
9938                 }
9939
9940                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9941                         u32 grc_mode;
9942
9943                         /* Fix transmit hangs */
9944                         val = tr32(TG3_CPMU_PADRNG_CTL);
9945                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9946                         tw32(TG3_CPMU_PADRNG_CTL, val);
9947
9948                         grc_mode = tr32(GRC_MODE);
9949
9950                         /* Access the lower 1K of DL PCIE block registers. */
9951                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9952                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9953
9954                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9955                                    TG3_PCIE_DL_LO_FTSMAX);
9956                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9957                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9958                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9959
9960                         tw32(GRC_MODE, grc_mode);
9961                 }
9962
9963                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9964                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9965                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9966                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9967         }
9968
9969         /* This works around an issue with Athlon chipsets on
9970          * B3 tigon3 silicon.  This bit has no effect on any
9971          * other revision.  But do not set this on PCI Express
9972          * chips and don't even touch the clocks if the CPMU is present.
9973          */
9974         if (!tg3_flag(tp, CPMU_PRESENT)) {
9975                 if (!tg3_flag(tp, PCI_EXPRESS))
9976                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9977                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9978         }
9979
9980         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9981             tg3_flag(tp, PCIX_MODE)) {
9982                 val = tr32(TG3PCI_PCISTATE);
9983                 val |= PCISTATE_RETRY_SAME_DMA;
9984                 tw32(TG3PCI_PCISTATE, val);
9985         }
9986
9987         if (tg3_flag(tp, ENABLE_APE)) {
9988                 /* Allow reads and writes to the
9989                  * APE register and memory space.
9990                  */
9991                 val = tr32(TG3PCI_PCISTATE);
9992                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9993                        PCISTATE_ALLOW_APE_SHMEM_WR |
9994                        PCISTATE_ALLOW_APE_PSPACE_WR;
9995                 tw32(TG3PCI_PCISTATE, val);
9996         }
9997
9998         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9999                 /* Enable some hw fixes.  */
10000                 val = tr32(TG3PCI_MSI_DATA);
10001                 val |= (1 << 26) | (1 << 28) | (1 << 29);
10002                 tw32(TG3PCI_MSI_DATA, val);
10003         }
10004
10005         /* Descriptor ring init may make accesses to the
10006          * NIC SRAM area to setup the TX descriptors, so we
10007          * can only do this after the hardware has been
10008          * successfully reset.
10009          */
10010         err = tg3_init_rings(tp);
10011         if (err)
10012                 return err;
10013
10014         if (tg3_flag(tp, 57765_PLUS)) {
10015                 val = tr32(TG3PCI_DMA_RW_CTRL) &
10016                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10017                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10018                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10019                 if (!tg3_flag(tp, 57765_CLASS) &&
10020                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
10021                     tg3_asic_rev(tp) != ASIC_REV_5762)
10022                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
10023                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10024         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10025                    tg3_asic_rev(tp) != ASIC_REV_5761) {
10026                 /* This value is determined during the probe time DMA
10027                  * engine test, tg3_test_dma.
10028                  */
10029                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10030         }
10031
10032         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10033                           GRC_MODE_4X_NIC_SEND_RINGS |
10034                           GRC_MODE_NO_TX_PHDR_CSUM |
10035                           GRC_MODE_NO_RX_PHDR_CSUM);
10036         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10037
10038         /* Pseudo-header checksum is done by hardware logic and not
10039          * the offload processers, so make the chip do the pseudo-
10040          * header checksums on receive.  For transmit it is more
10041          * convenient to do the pseudo-header checksum in software
10042          * as Linux does that on transmit for us in all cases.
10043          */
10044         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10045
10046         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10047         if (tp->rxptpctl)
10048                 tw32(TG3_RX_PTP_CTL,
10049                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10050
10051         if (tg3_flag(tp, PTP_CAPABLE))
10052                 val |= GRC_MODE_TIME_SYNC_ENABLE;
10053
10054         tw32(GRC_MODE, tp->grc_mode | val);
10055
10056         /* On one of the AMD platform, MRRS is restricted to 4000 because of
10057          * south bridge limitation. As a workaround, Driver is setting MRRS
10058          * to 2048 instead of default 4096.
10059          */
10060         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10061             tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10062                 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10063                 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10064         }
10065
10066         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
10067         val = tr32(GRC_MISC_CFG);
10068         val &= ~0xff;
10069         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10070         tw32(GRC_MISC_CFG, val);
10071
10072         /* Initialize MBUF/DESC pool. */
10073         if (tg3_flag(tp, 5750_PLUS)) {
10074                 /* Do nothing.  */
10075         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10076                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10077                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10078                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10079                 else
10080                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10081                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10082                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10083         } else if (tg3_flag(tp, TSO_CAPABLE)) {
10084                 int fw_len;
10085
10086                 fw_len = tp->fw_len;
10087                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10088                 tw32(BUFMGR_MB_POOL_ADDR,
10089                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10090                 tw32(BUFMGR_MB_POOL_SIZE,
10091                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10092         }
10093
10094         if (tp->dev->mtu <= ETH_DATA_LEN) {
10095                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10096                      tp->bufmgr_config.mbuf_read_dma_low_water);
10097                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10098                      tp->bufmgr_config.mbuf_mac_rx_low_water);
10099                 tw32(BUFMGR_MB_HIGH_WATER,
10100                      tp->bufmgr_config.mbuf_high_water);
10101         } else {
10102                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10103                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10104                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10105                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10106                 tw32(BUFMGR_MB_HIGH_WATER,
10107                      tp->bufmgr_config.mbuf_high_water_jumbo);
10108         }
10109         tw32(BUFMGR_DMA_LOW_WATER,
10110              tp->bufmgr_config.dma_low_water);
10111         tw32(BUFMGR_DMA_HIGH_WATER,
10112              tp->bufmgr_config.dma_high_water);
10113
10114         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10115         if (tg3_asic_rev(tp) == ASIC_REV_5719)
10116                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10117         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10118             tg3_asic_rev(tp) == ASIC_REV_5762 ||
10119             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10120             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10121                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10122         tw32(BUFMGR_MODE, val);
10123         for (i = 0; i < 2000; i++) {
10124                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10125                         break;
10126                 udelay(10);
10127         }
10128         if (i >= 2000) {
10129                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10130                 return -ENODEV;
10131         }
10132
10133         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10134                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10135
10136         tg3_setup_rxbd_thresholds(tp);
10137
10138         /* Initialize TG3_BDINFO's at:
10139          *  RCVDBDI_STD_BD:     standard eth size rx ring
10140          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
10141          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
10142          *
10143          * like so:
10144          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
10145          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
10146          *                              ring attribute flags
10147          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
10148          *
10149          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10150          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10151          *
10152          * The size of each ring is fixed in the firmware, but the location is
10153          * configurable.
10154          */
10155         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10156              ((u64) tpr->rx_std_mapping >> 32));
10157         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10158              ((u64) tpr->rx_std_mapping & 0xffffffff));
10159         if (!tg3_flag(tp, 5717_PLUS))
10160                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10161                      NIC_SRAM_RX_BUFFER_DESC);
10162
10163         /* Disable the mini ring */
10164         if (!tg3_flag(tp, 5705_PLUS))
10165                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10166                      BDINFO_FLAGS_DISABLED);
10167
10168         /* Program the jumbo buffer descriptor ring control
10169          * blocks on those devices that have them.
10170          */
10171         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10172             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10173
10174                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10175                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10176                              ((u64) tpr->rx_jmb_mapping >> 32));
10177                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10178                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10179                         val = TG3_RX_JMB_RING_SIZE(tp) <<
10180                               BDINFO_FLAGS_MAXLEN_SHIFT;
10181                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10182                              val | BDINFO_FLAGS_USE_EXT_RECV);
10183                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10184                             tg3_flag(tp, 57765_CLASS) ||
10185                             tg3_asic_rev(tp) == ASIC_REV_5762)
10186                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10187                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10188                 } else {
10189                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10190                              BDINFO_FLAGS_DISABLED);
10191                 }
10192
10193                 if (tg3_flag(tp, 57765_PLUS)) {
10194                         val = TG3_RX_STD_RING_SIZE(tp);
10195                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10196                         val |= (TG3_RX_STD_DMA_SZ << 2);
10197                 } else
10198                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10199         } else
10200                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10201
10202         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10203
10204         tpr->rx_std_prod_idx = tp->rx_pending;
10205         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10206
10207         tpr->rx_jmb_prod_idx =
10208                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10209         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10210
10211         tg3_rings_reset(tp);
10212
10213         /* Initialize MAC address and backoff seed. */
10214         __tg3_set_mac_addr(tp, false);
10215
10216         /* MTU + ethernet header + FCS + optional VLAN tag */
10217         tw32(MAC_RX_MTU_SIZE,
10218              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10219
10220         /* The slot time is changed by tg3_setup_phy if we
10221          * run at gigabit with half duplex.
10222          */
10223         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10224               (6 << TX_LENGTHS_IPG_SHIFT) |
10225               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10226
10227         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10228             tg3_asic_rev(tp) == ASIC_REV_5762)
10229                 val |= tr32(MAC_TX_LENGTHS) &
10230                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
10231                         TX_LENGTHS_CNT_DWN_VAL_MSK);
10232
10233         tw32(MAC_TX_LENGTHS, val);
10234
10235         /* Receive rules. */
10236         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10237         tw32(RCVLPC_CONFIG, 0x0181);
10238
10239         /* Calculate RDMAC_MODE setting early, we need it to determine
10240          * the RCVLPC_STATE_ENABLE mask.
10241          */
10242         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10243                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10244                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10245                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10246                       RDMAC_MODE_LNGREAD_ENAB);
10247
10248         if (tg3_asic_rev(tp) == ASIC_REV_5717)
10249                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10250
10251         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10252             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10253             tg3_asic_rev(tp) == ASIC_REV_57780)
10254                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10255                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10256                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10257
10258         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10259             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10260                 if (tg3_flag(tp, TSO_CAPABLE) &&
10261                     tg3_asic_rev(tp) == ASIC_REV_5705) {
10262                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10263                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10264                            !tg3_flag(tp, IS_5788)) {
10265                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10266                 }
10267         }
10268
10269         if (tg3_flag(tp, PCI_EXPRESS))
10270                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10271
10272         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10273                 tp->dma_limit = 0;
10274                 if (tp->dev->mtu <= ETH_DATA_LEN) {
10275                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10276                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10277                 }
10278         }
10279
10280         if (tg3_flag(tp, HW_TSO_1) ||
10281             tg3_flag(tp, HW_TSO_2) ||
10282             tg3_flag(tp, HW_TSO_3))
10283                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10284
10285         if (tg3_flag(tp, 57765_PLUS) ||
10286             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10287             tg3_asic_rev(tp) == ASIC_REV_57780)
10288                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10289
10290         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10291             tg3_asic_rev(tp) == ASIC_REV_5762)
10292                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10293
10294         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10295             tg3_asic_rev(tp) == ASIC_REV_5784 ||
10296             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10297             tg3_asic_rev(tp) == ASIC_REV_57780 ||
10298             tg3_flag(tp, 57765_PLUS)) {
10299                 u32 tgtreg;
10300
10301                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10302                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10303                 else
10304                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
10305
10306                 val = tr32(tgtreg);
10307                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10308                     tg3_asic_rev(tp) == ASIC_REV_5762) {
10309                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10310                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10311                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10312                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10313                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10314                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10315                 }
10316                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10317         }
10318
10319         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10320             tg3_asic_rev(tp) == ASIC_REV_5720 ||
10321             tg3_asic_rev(tp) == ASIC_REV_5762) {
10322                 u32 tgtreg;
10323
10324                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10325                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10326                 else
10327                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10328
10329                 val = tr32(tgtreg);
10330                 tw32(tgtreg, val |
10331                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10332                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10333         }
10334
10335         /* Receive/send statistics. */
10336         if (tg3_flag(tp, 5750_PLUS)) {
10337                 val = tr32(RCVLPC_STATS_ENABLE);
10338                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10339                 tw32(RCVLPC_STATS_ENABLE, val);
10340         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10341                    tg3_flag(tp, TSO_CAPABLE)) {
10342                 val = tr32(RCVLPC_STATS_ENABLE);
10343                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10344                 tw32(RCVLPC_STATS_ENABLE, val);
10345         } else {
10346                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10347         }
10348         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10349         tw32(SNDDATAI_STATSENAB, 0xffffff);
10350         tw32(SNDDATAI_STATSCTRL,
10351              (SNDDATAI_SCTRL_ENABLE |
10352               SNDDATAI_SCTRL_FASTUPD));
10353
10354         /* Setup host coalescing engine. */
10355         tw32(HOSTCC_MODE, 0);
10356         for (i = 0; i < 2000; i++) {
10357                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10358                         break;
10359                 udelay(10);
10360         }
10361
10362         __tg3_set_coalesce(tp, &tp->coal);
10363
10364         if (!tg3_flag(tp, 5705_PLUS)) {
10365                 /* Status/statistics block address.  See tg3_timer,
10366                  * the tg3_periodic_fetch_stats call there, and
10367                  * tg3_get_stats to see how this works for 5705/5750 chips.
10368                  */
10369                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10370                      ((u64) tp->stats_mapping >> 32));
10371                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10372                      ((u64) tp->stats_mapping & 0xffffffff));
10373                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10374
10375                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10376
10377                 /* Clear statistics and status block memory areas */
10378                 for (i = NIC_SRAM_STATS_BLK;
10379                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10380                      i += sizeof(u32)) {
10381                         tg3_write_mem(tp, i, 0);
10382                         udelay(40);
10383                 }
10384         }
10385
10386         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10387
10388         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10389         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10390         if (!tg3_flag(tp, 5705_PLUS))
10391                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10392
10393         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10394                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10395                 /* reset to prevent losing 1st rx packet intermittently */
10396                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10397                 udelay(10);
10398         }
10399
10400         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10401                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10402                         MAC_MODE_FHDE_ENABLE;
10403         if (tg3_flag(tp, ENABLE_APE))
10404                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10405         if (!tg3_flag(tp, 5705_PLUS) &&
10406             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10407             tg3_asic_rev(tp) != ASIC_REV_5700)
10408                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10409         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10410         udelay(40);
10411
10412         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10413          * If TG3_FLAG_IS_NIC is zero, we should read the
10414          * register to preserve the GPIO settings for LOMs. The GPIOs,
10415          * whether used as inputs or outputs, are set by boot code after
10416          * reset.
10417          */
10418         if (!tg3_flag(tp, IS_NIC)) {
10419                 u32 gpio_mask;
10420
10421                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10422                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10423                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10424
10425                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10426                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10427                                      GRC_LCLCTRL_GPIO_OUTPUT3;
10428
10429                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10430                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10431
10432                 tp->grc_local_ctrl &= ~gpio_mask;
10433                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10434
10435                 /* GPIO1 must be driven high for eeprom write protect */
10436                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10437                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10438                                                GRC_LCLCTRL_GPIO_OUTPUT1);
10439         }
10440         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10441         udelay(100);
10442
10443         if (tg3_flag(tp, USING_MSIX)) {
10444                 val = tr32(MSGINT_MODE);
10445                 val |= MSGINT_MODE_ENABLE;
10446                 if (tp->irq_cnt > 1)
10447                         val |= MSGINT_MODE_MULTIVEC_EN;
10448                 if (!tg3_flag(tp, 1SHOT_MSI))
10449                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10450                 tw32(MSGINT_MODE, val);
10451         }
10452
10453         if (!tg3_flag(tp, 5705_PLUS)) {
10454                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10455                 udelay(40);
10456         }
10457
10458         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10459                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10460                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10461                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10462                WDMAC_MODE_LNGREAD_ENAB);
10463
10464         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10465             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10466                 if (tg3_flag(tp, TSO_CAPABLE) &&
10467                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10468                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10469                         /* nothing */
10470                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10471                            !tg3_flag(tp, IS_5788)) {
10472                         val |= WDMAC_MODE_RX_ACCEL;
10473                 }
10474         }
10475
10476         /* Enable host coalescing bug fix */
10477         if (tg3_flag(tp, 5755_PLUS))
10478                 val |= WDMAC_MODE_STATUS_TAG_FIX;
10479
10480         if (tg3_asic_rev(tp) == ASIC_REV_5785)
10481                 val |= WDMAC_MODE_BURST_ALL_DATA;
10482
10483         tw32_f(WDMAC_MODE, val);
10484         udelay(40);
10485
10486         if (tg3_flag(tp, PCIX_MODE)) {
10487                 u16 pcix_cmd;
10488
10489                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10490                                      &pcix_cmd);
10491                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10492                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10493                         pcix_cmd |= PCI_X_CMD_READ_2K;
10494                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10495                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10496                         pcix_cmd |= PCI_X_CMD_READ_2K;
10497                 }
10498                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10499                                       pcix_cmd);
10500         }
10501
10502         tw32_f(RDMAC_MODE, rdmac_mode);
10503         udelay(40);
10504
10505         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10506             tg3_asic_rev(tp) == ASIC_REV_5720) {
10507                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10508                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10509                                 break;
10510                 }
10511                 if (i < TG3_NUM_RDMA_CHANNELS) {
10512                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10513                         val |= tg3_lso_rd_dma_workaround_bit(tp);
10514                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10515                         tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10516                 }
10517         }
10518
10519         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10520         if (!tg3_flag(tp, 5705_PLUS))
10521                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10522
10523         if (tg3_asic_rev(tp) == ASIC_REV_5761)
10524                 tw32(SNDDATAC_MODE,
10525                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10526         else
10527                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10528
10529         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10530         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10531         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10532         if (tg3_flag(tp, LRG_PROD_RING_CAP))
10533                 val |= RCVDBDI_MODE_LRG_RING_SZ;
10534         tw32(RCVDBDI_MODE, val);
10535         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10536         if (tg3_flag(tp, HW_TSO_1) ||
10537             tg3_flag(tp, HW_TSO_2) ||
10538             tg3_flag(tp, HW_TSO_3))
10539                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10540         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10541         if (tg3_flag(tp, ENABLE_TSS))
10542                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10543         tw32(SNDBDI_MODE, val);
10544         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10545
10546         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10547                 err = tg3_load_5701_a0_firmware_fix(tp);
10548                 if (err)
10549                         return err;
10550         }
10551
10552         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10553                 /* Ignore any errors for the firmware download. If download
10554                  * fails, the device will operate with EEE disabled
10555                  */
10556                 tg3_load_57766_firmware(tp);
10557         }
10558
10559         if (tg3_flag(tp, TSO_CAPABLE)) {
10560                 err = tg3_load_tso_firmware(tp);
10561                 if (err)
10562                         return err;
10563         }
10564
10565         tp->tx_mode = TX_MODE_ENABLE;
10566
10567         if (tg3_flag(tp, 5755_PLUS) ||
10568             tg3_asic_rev(tp) == ASIC_REV_5906)
10569                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10570
10571         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10572             tg3_asic_rev(tp) == ASIC_REV_5762) {
10573                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10574                 tp->tx_mode &= ~val;
10575                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10576         }
10577
10578         tw32_f(MAC_TX_MODE, tp->tx_mode);
10579         udelay(100);
10580
10581         if (tg3_flag(tp, ENABLE_RSS)) {
10582                 u32 rss_key[10];
10583
10584                 tg3_rss_write_indir_tbl(tp);
10585
10586                 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10587
10588                 for (i = 0; i < 10 ; i++)
10589                         tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10590         }
10591
10592         tp->rx_mode = RX_MODE_ENABLE;
10593         if (tg3_flag(tp, 5755_PLUS))
10594                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10595
10596         if (tg3_asic_rev(tp) == ASIC_REV_5762)
10597                 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10598
10599         if (tg3_flag(tp, ENABLE_RSS))
10600                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10601                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
10602                                RX_MODE_RSS_IPV6_HASH_EN |
10603                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
10604                                RX_MODE_RSS_IPV4_HASH_EN |
10605                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
10606
10607         tw32_f(MAC_RX_MODE, tp->rx_mode);
10608         udelay(10);
10609
10610         tw32(MAC_LED_CTRL, tp->led_ctrl);
10611
10612         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10613         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10614                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10615                 udelay(10);
10616         }
10617         tw32_f(MAC_RX_MODE, tp->rx_mode);
10618         udelay(10);
10619
10620         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10621                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10622                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10623                         /* Set drive transmission level to 1.2V  */
10624                         /* only if the signal pre-emphasis bit is not set  */
10625                         val = tr32(MAC_SERDES_CFG);
10626                         val &= 0xfffff000;
10627                         val |= 0x880;
10628                         tw32(MAC_SERDES_CFG, val);
10629                 }
10630                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10631                         tw32(MAC_SERDES_CFG, 0x616000);
10632         }
10633
10634         /* Prevent chip from dropping frames when flow control
10635          * is enabled.
10636          */
10637         if (tg3_flag(tp, 57765_CLASS))
10638                 val = 1;
10639         else
10640                 val = 2;
10641         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10642
10643         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10644             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10645                 /* Use hardware link auto-negotiation */
10646                 tg3_flag_set(tp, HW_AUTONEG);
10647         }
10648
10649         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10650             tg3_asic_rev(tp) == ASIC_REV_5714) {
10651                 u32 tmp;
10652
10653                 tmp = tr32(SERDES_RX_CTRL);
10654                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10655                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10656                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10657                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10658         }
10659
10660         if (!tg3_flag(tp, USE_PHYLIB)) {
10661                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10662                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10663
10664                 err = tg3_setup_phy(tp, false);
10665                 if (err)
10666                         return err;
10667
10668                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10669                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10670                         u32 tmp;
10671
10672                         /* Clear CRC stats. */
10673                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10674                                 tg3_writephy(tp, MII_TG3_TEST1,
10675                                              tmp | MII_TG3_TEST1_CRC_EN);
10676                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10677                         }
10678                 }
10679         }
10680
10681         __tg3_set_rx_mode(tp->dev);
10682
10683         /* Initialize receive rules. */
10684         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10685         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10686         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10687         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10688
10689         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10690                 limit = 8;
10691         else
10692                 limit = 16;
10693         if (tg3_flag(tp, ENABLE_ASF))
10694                 limit -= 4;
10695         switch (limit) {
10696         case 16:
10697                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10698         case 15:
10699                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10700         case 14:
10701                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10702         case 13:
10703                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10704         case 12:
10705                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10706         case 11:
10707                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10708         case 10:
10709                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10710         case 9:
10711                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10712         case 8:
10713                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10714         case 7:
10715                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10716         case 6:
10717                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10718         case 5:
10719                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10720         case 4:
10721                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10722         case 3:
10723                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10724         case 2:
10725         case 1:
10726
10727         default:
10728                 break;
10729         }
10730
10731         if (tg3_flag(tp, ENABLE_APE))
10732                 /* Write our heartbeat update interval to APE. */
10733                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10734                                 APE_HOST_HEARTBEAT_INT_DISABLE);
10735
10736         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10737
10738         return 0;
10739 }
10740
10741 /* Called at device open time to get the chip ready for
10742  * packet processing.  Invoked with tp->lock held.
10743  */
10744 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10745 {
10746         /* Chip may have been just powered on. If so, the boot code may still
10747          * be running initialization. Wait for it to finish to avoid races in
10748          * accessing the hardware.
10749          */
10750         tg3_enable_register_access(tp);
10751         tg3_poll_fw(tp);
10752
10753         tg3_switch_clocks(tp);
10754
10755         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10756
10757         return tg3_reset_hw(tp, reset_phy);
10758 }
10759
10760 #ifdef CONFIG_TIGON3_HWMON
10761 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10762 {
10763         int i;
10764
10765         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10766                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10767
10768                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10769                 off += len;
10770
10771                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10772                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10773                         memset(ocir, 0, TG3_OCIR_LEN);
10774         }
10775 }
10776
10777 /* sysfs attributes for hwmon */
10778 static ssize_t tg3_show_temp(struct device *dev,
10779                              struct device_attribute *devattr, char *buf)
10780 {
10781         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10782         struct tg3 *tp = dev_get_drvdata(dev);
10783         u32 temperature;
10784
10785         spin_lock_bh(&tp->lock);
10786         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10787                                 sizeof(temperature));
10788         spin_unlock_bh(&tp->lock);
10789         return sprintf(buf, "%u\n", temperature * 1000);
10790 }
10791
10792
10793 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10794                           TG3_TEMP_SENSOR_OFFSET);
10795 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10796                           TG3_TEMP_CAUTION_OFFSET);
10797 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10798                           TG3_TEMP_MAX_OFFSET);
10799
10800 static struct attribute *tg3_attrs[] = {
10801         &sensor_dev_attr_temp1_input.dev_attr.attr,
10802         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10803         &sensor_dev_attr_temp1_max.dev_attr.attr,
10804         NULL
10805 };
10806 ATTRIBUTE_GROUPS(tg3);
10807
10808 static void tg3_hwmon_close(struct tg3 *tp)
10809 {
10810         if (tp->hwmon_dev) {
10811                 hwmon_device_unregister(tp->hwmon_dev);
10812                 tp->hwmon_dev = NULL;
10813         }
10814 }
10815
10816 static void tg3_hwmon_open(struct tg3 *tp)
10817 {
10818         int i;
10819         u32 size = 0;
10820         struct pci_dev *pdev = tp->pdev;
10821         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10822
10823         tg3_sd_scan_scratchpad(tp, ocirs);
10824
10825         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10826                 if (!ocirs[i].src_data_length)
10827                         continue;
10828
10829                 size += ocirs[i].src_hdr_length;
10830                 size += ocirs[i].src_data_length;
10831         }
10832
10833         if (!size)
10834                 return;
10835
10836         tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10837                                                           tp, tg3_groups);
10838         if (IS_ERR(tp->hwmon_dev)) {
10839                 tp->hwmon_dev = NULL;
10840                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10841         }
10842 }
10843 #else
10844 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10845 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10846 #endif /* CONFIG_TIGON3_HWMON */
10847
10848
10849 #define TG3_STAT_ADD32(PSTAT, REG) \
10850 do {    u32 __val = tr32(REG); \
10851         (PSTAT)->low += __val; \
10852         if ((PSTAT)->low < __val) \
10853                 (PSTAT)->high += 1; \
10854 } while (0)
10855
10856 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10857 {
10858         struct tg3_hw_stats *sp = tp->hw_stats;
10859
10860         if (!tp->link_up)
10861                 return;
10862
10863         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10864         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10865         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10866         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10867         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10868         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10869         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10870         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10871         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10872         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10873         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10874         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10875         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10876         if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10877                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10878                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10879                 u32 val;
10880
10881                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10882                 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10883                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10884                 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10885         }
10886
10887         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10888         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10889         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10890         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10891         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10892         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10893         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10894         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10895         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10896         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10897         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10898         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10899         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10900         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10901
10902         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10903         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10904             tg3_asic_rev(tp) != ASIC_REV_5762 &&
10905             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10906             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10907                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10908         } else {
10909                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10910                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10911                 if (val) {
10912                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10913                         sp->rx_discards.low += val;
10914                         if (sp->rx_discards.low < val)
10915                                 sp->rx_discards.high += 1;
10916                 }
10917                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10918         }
10919         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10920 }
10921
10922 static void tg3_chk_missed_msi(struct tg3 *tp)
10923 {
10924         u32 i;
10925
10926         for (i = 0; i < tp->irq_cnt; i++) {
10927                 struct tg3_napi *tnapi = &tp->napi[i];
10928
10929                 if (tg3_has_work(tnapi)) {
10930                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10931                             tnapi->last_tx_cons == tnapi->tx_cons) {
10932                                 if (tnapi->chk_msi_cnt < 1) {
10933                                         tnapi->chk_msi_cnt++;
10934                                         return;
10935                                 }
10936                                 tg3_msi(0, tnapi);
10937                         }
10938                 }
10939                 tnapi->chk_msi_cnt = 0;
10940                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10941                 tnapi->last_tx_cons = tnapi->tx_cons;
10942         }
10943 }
10944
10945 static void tg3_timer(unsigned long __opaque)
10946 {
10947         struct tg3 *tp = (struct tg3 *) __opaque;
10948
10949         spin_lock(&tp->lock);
10950
10951         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10952                 spin_unlock(&tp->lock);
10953                 goto restart_timer;
10954         }
10955
10956         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10957             tg3_flag(tp, 57765_CLASS))
10958                 tg3_chk_missed_msi(tp);
10959
10960         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10961                 /* BCM4785: Flush posted writes from GbE to host memory. */
10962                 tr32(HOSTCC_MODE);
10963         }
10964
10965         if (!tg3_flag(tp, TAGGED_STATUS)) {
10966                 /* All of this garbage is because when using non-tagged
10967                  * IRQ status the mailbox/status_block protocol the chip
10968                  * uses with the cpu is race prone.
10969                  */
10970                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10971                         tw32(GRC_LOCAL_CTRL,
10972                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10973                 } else {
10974                         tw32(HOSTCC_MODE, tp->coalesce_mode |
10975                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10976                 }
10977
10978                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10979                         spin_unlock(&tp->lock);
10980                         tg3_reset_task_schedule(tp);
10981                         goto restart_timer;
10982                 }
10983         }
10984
10985         /* This part only runs once per second. */
10986         if (!--tp->timer_counter) {
10987                 if (tg3_flag(tp, 5705_PLUS))
10988                         tg3_periodic_fetch_stats(tp);
10989
10990                 if (tp->setlpicnt && !--tp->setlpicnt)
10991                         tg3_phy_eee_enable(tp);
10992
10993                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10994                         u32 mac_stat;
10995                         int phy_event;
10996
10997                         mac_stat = tr32(MAC_STATUS);
10998
10999                         phy_event = 0;
11000                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11001                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11002                                         phy_event = 1;
11003                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11004                                 phy_event = 1;
11005
11006                         if (phy_event)
11007                                 tg3_setup_phy(tp, false);
11008                 } else if (tg3_flag(tp, POLL_SERDES)) {
11009                         u32 mac_stat = tr32(MAC_STATUS);
11010                         int need_setup = 0;
11011
11012                         if (tp->link_up &&
11013                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11014                                 need_setup = 1;
11015                         }
11016                         if (!tp->link_up &&
11017                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
11018                                          MAC_STATUS_SIGNAL_DET))) {
11019                                 need_setup = 1;
11020                         }
11021                         if (need_setup) {
11022                                 if (!tp->serdes_counter) {
11023                                         tw32_f(MAC_MODE,
11024                                              (tp->mac_mode &
11025                                               ~MAC_MODE_PORT_MODE_MASK));
11026                                         udelay(40);
11027                                         tw32_f(MAC_MODE, tp->mac_mode);
11028                                         udelay(40);
11029                                 }
11030                                 tg3_setup_phy(tp, false);
11031                         }
11032                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11033                            tg3_flag(tp, 5780_CLASS)) {
11034                         tg3_serdes_parallel_detect(tp);
11035                 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11036                         u32 cpmu = tr32(TG3_CPMU_STATUS);
11037                         bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11038                                          TG3_CPMU_STATUS_LINK_MASK);
11039
11040                         if (link_up != tp->link_up)
11041                                 tg3_setup_phy(tp, false);
11042                 }
11043
11044                 tp->timer_counter = tp->timer_multiplier;
11045         }
11046
11047         /* Heartbeat is only sent once every 2 seconds.
11048          *
11049          * The heartbeat is to tell the ASF firmware that the host
11050          * driver is still alive.  In the event that the OS crashes,
11051          * ASF needs to reset the hardware to free up the FIFO space
11052          * that may be filled with rx packets destined for the host.
11053          * If the FIFO is full, ASF will no longer function properly.
11054          *
11055          * Unintended resets have been reported on real time kernels
11056          * where the timer doesn't run on time.  Netpoll will also have
11057          * same problem.
11058          *
11059          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11060          * to check the ring condition when the heartbeat is expiring
11061          * before doing the reset.  This will prevent most unintended
11062          * resets.
11063          */
11064         if (!--tp->asf_counter) {
11065                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11066                         tg3_wait_for_event_ack(tp);
11067
11068                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11069                                       FWCMD_NICDRV_ALIVE3);
11070                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11071                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11072                                       TG3_FW_UPDATE_TIMEOUT_SEC);
11073
11074                         tg3_generate_fw_event(tp);
11075                 }
11076                 tp->asf_counter = tp->asf_multiplier;
11077         }
11078
11079         spin_unlock(&tp->lock);
11080
11081 restart_timer:
11082         tp->timer.expires = jiffies + tp->timer_offset;
11083         add_timer(&tp->timer);
11084 }
11085
11086 static void tg3_timer_init(struct tg3 *tp)
11087 {
11088         if (tg3_flag(tp, TAGGED_STATUS) &&
11089             tg3_asic_rev(tp) != ASIC_REV_5717 &&
11090             !tg3_flag(tp, 57765_CLASS))
11091                 tp->timer_offset = HZ;
11092         else
11093                 tp->timer_offset = HZ / 10;
11094
11095         BUG_ON(tp->timer_offset > HZ);
11096
11097         tp->timer_multiplier = (HZ / tp->timer_offset);
11098         tp->asf_multiplier = (HZ / tp->timer_offset) *
11099                              TG3_FW_UPDATE_FREQ_SEC;
11100
11101         init_timer(&tp->timer);
11102         tp->timer.data = (unsigned long) tp;
11103         tp->timer.function = tg3_timer;
11104 }
11105
11106 static void tg3_timer_start(struct tg3 *tp)
11107 {
11108         tp->asf_counter   = tp->asf_multiplier;
11109         tp->timer_counter = tp->timer_multiplier;
11110
11111         tp->timer.expires = jiffies + tp->timer_offset;
11112         add_timer(&tp->timer);
11113 }
11114
11115 static void tg3_timer_stop(struct tg3 *tp)
11116 {
11117         del_timer_sync(&tp->timer);
11118 }
11119
11120 /* Restart hardware after configuration changes, self-test, etc.
11121  * Invoked with tp->lock held.
11122  */
11123 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11124         __releases(tp->lock)
11125         __acquires(tp->lock)
11126 {
11127         int err;
11128
11129         err = tg3_init_hw(tp, reset_phy);
11130         if (err) {
11131                 netdev_err(tp->dev,
11132                            "Failed to re-initialize device, aborting\n");
11133                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11134                 tg3_full_unlock(tp);
11135                 tg3_timer_stop(tp);
11136                 tp->irq_sync = 0;
11137                 tg3_napi_enable(tp);
11138                 dev_close(tp->dev);
11139                 tg3_full_lock(tp, 0);
11140         }
11141         return err;
11142 }
11143
11144 static void tg3_reset_task(struct work_struct *work)
11145 {
11146         struct tg3 *tp = container_of(work, struct tg3, reset_task);
11147         int err;
11148
11149         rtnl_lock();
11150         tg3_full_lock(tp, 0);
11151
11152         if (!netif_running(tp->dev)) {
11153                 tg3_flag_clear(tp, RESET_TASK_PENDING);
11154                 tg3_full_unlock(tp);
11155                 rtnl_unlock();
11156                 return;
11157         }
11158
11159         tg3_full_unlock(tp);
11160
11161         tg3_phy_stop(tp);
11162
11163         tg3_netif_stop(tp);
11164
11165         tg3_full_lock(tp, 1);
11166
11167         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11168                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11169                 tp->write32_rx_mbox = tg3_write_flush_reg32;
11170                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11171                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11172         }
11173
11174         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11175         err = tg3_init_hw(tp, true);
11176         if (err) {
11177                 tg3_full_unlock(tp);
11178                 tp->irq_sync = 0;
11179                 tg3_napi_enable(tp);
11180                 /* Clear this flag so that tg3_reset_task_cancel() will not
11181                  * call cancel_work_sync() and wait forever.
11182                  */
11183                 tg3_flag_clear(tp, RESET_TASK_PENDING);
11184                 dev_close(tp->dev);
11185                 goto out;
11186         }
11187
11188         tg3_netif_start(tp);
11189
11190         tg3_full_unlock(tp);
11191
11192         if (!err)
11193                 tg3_phy_start(tp);
11194
11195         tg3_flag_clear(tp, RESET_TASK_PENDING);
11196 out:
11197         rtnl_unlock();
11198 }
11199
11200 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11201 {
11202         irq_handler_t fn;
11203         unsigned long flags;
11204         char *name;
11205         struct tg3_napi *tnapi = &tp->napi[irq_num];
11206
11207         if (tp->irq_cnt == 1)
11208                 name = tp->dev->name;
11209         else {
11210                 name = &tnapi->irq_lbl[0];
11211                 if (tnapi->tx_buffers && tnapi->rx_rcb)
11212                         snprintf(name, IFNAMSIZ,
11213                                  "%s-txrx-%d", tp->dev->name, irq_num);
11214                 else if (tnapi->tx_buffers)
11215                         snprintf(name, IFNAMSIZ,
11216                                  "%s-tx-%d", tp->dev->name, irq_num);
11217                 else if (tnapi->rx_rcb)
11218                         snprintf(name, IFNAMSIZ,
11219                                  "%s-rx-%d", tp->dev->name, irq_num);
11220                 else
11221                         snprintf(name, IFNAMSIZ,
11222                                  "%s-%d", tp->dev->name, irq_num);
11223                 name[IFNAMSIZ-1] = 0;
11224         }
11225
11226         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11227                 fn = tg3_msi;
11228                 if (tg3_flag(tp, 1SHOT_MSI))
11229                         fn = tg3_msi_1shot;
11230                 flags = 0;
11231         } else {
11232                 fn = tg3_interrupt;
11233                 if (tg3_flag(tp, TAGGED_STATUS))
11234                         fn = tg3_interrupt_tagged;
11235                 flags = IRQF_SHARED;
11236         }
11237
11238         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11239 }
11240
11241 static int tg3_test_interrupt(struct tg3 *tp)
11242 {
11243         struct tg3_napi *tnapi = &tp->napi[0];
11244         struct net_device *dev = tp->dev;
11245         int err, i, intr_ok = 0;
11246         u32 val;
11247
11248         if (!netif_running(dev))
11249                 return -ENODEV;
11250
11251         tg3_disable_ints(tp);
11252
11253         free_irq(tnapi->irq_vec, tnapi);
11254
11255         /*
11256          * Turn off MSI one shot mode.  Otherwise this test has no
11257          * observable way to know whether the interrupt was delivered.
11258          */
11259         if (tg3_flag(tp, 57765_PLUS)) {
11260                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11261                 tw32(MSGINT_MODE, val);
11262         }
11263
11264         err = request_irq(tnapi->irq_vec, tg3_test_isr,
11265                           IRQF_SHARED, dev->name, tnapi);
11266         if (err)
11267                 return err;
11268
11269         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11270         tg3_enable_ints(tp);
11271
11272         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11273                tnapi->coal_now);
11274
11275         for (i = 0; i < 5; i++) {
11276                 u32 int_mbox, misc_host_ctrl;
11277
11278                 int_mbox = tr32_mailbox(tnapi->int_mbox);
11279                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11280
11281                 if ((int_mbox != 0) ||
11282                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11283                         intr_ok = 1;
11284                         break;
11285                 }
11286
11287                 if (tg3_flag(tp, 57765_PLUS) &&
11288                     tnapi->hw_status->status_tag != tnapi->last_tag)
11289                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11290
11291                 msleep(10);
11292         }
11293
11294         tg3_disable_ints(tp);
11295
11296         free_irq(tnapi->irq_vec, tnapi);
11297
11298         err = tg3_request_irq(tp, 0);
11299
11300         if (err)
11301                 return err;
11302
11303         if (intr_ok) {
11304                 /* Reenable MSI one shot mode. */
11305                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11306                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11307                         tw32(MSGINT_MODE, val);
11308                 }
11309                 return 0;
11310         }
11311
11312         return -EIO;
11313 }
11314
11315 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11316  * successfully restored
11317  */
11318 static int tg3_test_msi(struct tg3 *tp)
11319 {
11320         int err;
11321         u16 pci_cmd;
11322
11323         if (!tg3_flag(tp, USING_MSI))
11324                 return 0;
11325
11326         /* Turn off SERR reporting in case MSI terminates with Master
11327          * Abort.
11328          */
11329         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11330         pci_write_config_word(tp->pdev, PCI_COMMAND,
11331                               pci_cmd & ~PCI_COMMAND_SERR);
11332
11333         err = tg3_test_interrupt(tp);
11334
11335         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11336
11337         if (!err)
11338                 return 0;
11339
11340         /* other failures */
11341         if (err != -EIO)
11342                 return err;
11343
11344         /* MSI test failed, go back to INTx mode */
11345         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11346                     "to INTx mode. Please report this failure to the PCI "
11347                     "maintainer and include system chipset information\n");
11348
11349         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11350
11351         pci_disable_msi(tp->pdev);
11352
11353         tg3_flag_clear(tp, USING_MSI);
11354         tp->napi[0].irq_vec = tp->pdev->irq;
11355
11356         err = tg3_request_irq(tp, 0);
11357         if (err)
11358                 return err;
11359
11360         /* Need to reset the chip because the MSI cycle may have terminated
11361          * with Master Abort.
11362          */
11363         tg3_full_lock(tp, 1);
11364
11365         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11366         err = tg3_init_hw(tp, true);
11367
11368         tg3_full_unlock(tp);
11369
11370         if (err)
11371                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11372
11373         return err;
11374 }
11375
11376 static int tg3_request_firmware(struct tg3 *tp)
11377 {
11378         const struct tg3_firmware_hdr *fw_hdr;
11379
11380         if (reject_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11381                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11382                            tp->fw_needed);
11383                 return -ENOENT;
11384         }
11385
11386         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11387
11388         /* Firmware blob starts with version numbers, followed by
11389          * start address and _full_ length including BSS sections
11390          * (which must be longer than the actual data, of course
11391          */
11392
11393         tp->fw_len = be32_to_cpu(fw_hdr->len);  /* includes bss */
11394         if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11395                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11396                            tp->fw_len, tp->fw_needed);
11397                 release_firmware(tp->fw);
11398                 tp->fw = NULL;
11399                 return -EINVAL;
11400         }
11401
11402         /* We no longer need firmware; we have it. */
11403         tp->fw_needed = NULL;
11404         return 0;
11405 }
11406
11407 static u32 tg3_irq_count(struct tg3 *tp)
11408 {
11409         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11410
11411         if (irq_cnt > 1) {
11412                 /* We want as many rx rings enabled as there are cpus.
11413                  * In multiqueue MSI-X mode, the first MSI-X vector
11414                  * only deals with link interrupts, etc, so we add
11415                  * one to the number of vectors we are requesting.
11416                  */
11417                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11418         }
11419
11420         return irq_cnt;
11421 }
11422
11423 static bool tg3_enable_msix(struct tg3 *tp)
11424 {
11425         int i, rc;
11426         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11427
11428         tp->txq_cnt = tp->txq_req;
11429         tp->rxq_cnt = tp->rxq_req;
11430         if (!tp->rxq_cnt)
11431                 tp->rxq_cnt = netif_get_num_default_rss_queues();
11432         if (tp->rxq_cnt > tp->rxq_max)
11433                 tp->rxq_cnt = tp->rxq_max;
11434
11435         /* Disable multiple TX rings by default.  Simple round-robin hardware
11436          * scheduling of the TX rings can cause starvation of rings with
11437          * small packets when other rings have TSO or jumbo packets.
11438          */
11439         if (!tp->txq_req)
11440                 tp->txq_cnt = 1;
11441
11442         tp->irq_cnt = tg3_irq_count(tp);
11443
11444         for (i = 0; i < tp->irq_max; i++) {
11445                 msix_ent[i].entry  = i;
11446                 msix_ent[i].vector = 0;
11447         }
11448
11449         rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11450         if (rc < 0) {
11451                 return false;
11452         } else if (rc < tp->irq_cnt) {
11453                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11454                               tp->irq_cnt, rc);
11455                 tp->irq_cnt = rc;
11456                 tp->rxq_cnt = max(rc - 1, 1);
11457                 if (tp->txq_cnt)
11458                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11459         }
11460
11461         for (i = 0; i < tp->irq_max; i++)
11462                 tp->napi[i].irq_vec = msix_ent[i].vector;
11463
11464         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11465                 pci_disable_msix(tp->pdev);
11466                 return false;
11467         }
11468
11469         if (tp->irq_cnt == 1)
11470                 return true;
11471
11472         tg3_flag_set(tp, ENABLE_RSS);
11473
11474         if (tp->txq_cnt > 1)
11475                 tg3_flag_set(tp, ENABLE_TSS);
11476
11477         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11478
11479         return true;
11480 }
11481
11482 static void tg3_ints_init(struct tg3 *tp)
11483 {
11484         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11485             !tg3_flag(tp, TAGGED_STATUS)) {
11486                 /* All MSI supporting chips should support tagged
11487                  * status.  Assert that this is the case.
11488                  */
11489                 netdev_warn(tp->dev,
11490                             "MSI without TAGGED_STATUS? Not using MSI\n");
11491                 goto defcfg;
11492         }
11493
11494         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11495                 tg3_flag_set(tp, USING_MSIX);
11496         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11497                 tg3_flag_set(tp, USING_MSI);
11498
11499         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11500                 u32 msi_mode = tr32(MSGINT_MODE);
11501                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11502                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11503                 if (!tg3_flag(tp, 1SHOT_MSI))
11504                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11505                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11506         }
11507 defcfg:
11508         if (!tg3_flag(tp, USING_MSIX)) {
11509                 tp->irq_cnt = 1;
11510                 tp->napi[0].irq_vec = tp->pdev->irq;
11511         }
11512
11513         if (tp->irq_cnt == 1) {
11514                 tp->txq_cnt = 1;
11515                 tp->rxq_cnt = 1;
11516                 netif_set_real_num_tx_queues(tp->dev, 1);
11517                 netif_set_real_num_rx_queues(tp->dev, 1);
11518         }
11519 }
11520
11521 static void tg3_ints_fini(struct tg3 *tp)
11522 {
11523         if (tg3_flag(tp, USING_MSIX))
11524                 pci_disable_msix(tp->pdev);
11525         else if (tg3_flag(tp, USING_MSI))
11526                 pci_disable_msi(tp->pdev);
11527         tg3_flag_clear(tp, USING_MSI);
11528         tg3_flag_clear(tp, USING_MSIX);
11529         tg3_flag_clear(tp, ENABLE_RSS);
11530         tg3_flag_clear(tp, ENABLE_TSS);
11531 }
11532
11533 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11534                      bool init)
11535 {
11536         struct net_device *dev = tp->dev;
11537         int i, err;
11538
11539         /*
11540          * Setup interrupts first so we know how
11541          * many NAPI resources to allocate
11542          */
11543         tg3_ints_init(tp);
11544
11545         tg3_rss_check_indir_tbl(tp);
11546
11547         /* The placement of this call is tied
11548          * to the setup and use of Host TX descriptors.
11549          */
11550         err = tg3_alloc_consistent(tp);
11551         if (err)
11552                 goto out_ints_fini;
11553
11554         tg3_napi_init(tp);
11555
11556         tg3_napi_enable(tp);
11557
11558         for (i = 0; i < tp->irq_cnt; i++) {
11559                 err = tg3_request_irq(tp, i);
11560                 if (err) {
11561                         for (i--; i >= 0; i--) {
11562                                 struct tg3_napi *tnapi = &tp->napi[i];
11563
11564                                 free_irq(tnapi->irq_vec, tnapi);
11565                         }
11566                         goto out_napi_fini;
11567                 }
11568         }
11569
11570         tg3_full_lock(tp, 0);
11571
11572         if (init)
11573                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11574
11575         err = tg3_init_hw(tp, reset_phy);
11576         if (err) {
11577                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11578                 tg3_free_rings(tp);
11579         }
11580
11581         tg3_full_unlock(tp);
11582
11583         if (err)
11584                 goto out_free_irq;
11585
11586         if (test_irq && tg3_flag(tp, USING_MSI)) {
11587                 err = tg3_test_msi(tp);
11588
11589                 if (err) {
11590                         tg3_full_lock(tp, 0);
11591                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11592                         tg3_free_rings(tp);
11593                         tg3_full_unlock(tp);
11594
11595                         goto out_napi_fini;
11596                 }
11597
11598                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11599                         u32 val = tr32(PCIE_TRANSACTION_CFG);
11600
11601                         tw32(PCIE_TRANSACTION_CFG,
11602                              val | PCIE_TRANS_CFG_1SHOT_MSI);
11603                 }
11604         }
11605
11606         tg3_phy_start(tp);
11607
11608         tg3_hwmon_open(tp);
11609
11610         tg3_full_lock(tp, 0);
11611
11612         tg3_timer_start(tp);
11613         tg3_flag_set(tp, INIT_COMPLETE);
11614         tg3_enable_ints(tp);
11615
11616         tg3_ptp_resume(tp);
11617
11618         tg3_full_unlock(tp);
11619
11620         netif_tx_start_all_queues(dev);
11621
11622         /*
11623          * Reset loopback feature if it was turned on while the device was down
11624          * make sure that it's installed properly now.
11625          */
11626         if (dev->features & NETIF_F_LOOPBACK)
11627                 tg3_set_loopback(dev, dev->features);
11628
11629         return 0;
11630
11631 out_free_irq:
11632         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11633                 struct tg3_napi *tnapi = &tp->napi[i];
11634                 free_irq(tnapi->irq_vec, tnapi);
11635         }
11636
11637 out_napi_fini:
11638         tg3_napi_disable(tp);
11639         tg3_napi_fini(tp);
11640         tg3_free_consistent(tp);
11641
11642 out_ints_fini:
11643         tg3_ints_fini(tp);
11644
11645         return err;
11646 }
11647
11648 static void tg3_stop(struct tg3 *tp)
11649 {
11650         int i;
11651
11652         tg3_reset_task_cancel(tp);
11653         tg3_netif_stop(tp);
11654
11655         tg3_timer_stop(tp);
11656
11657         tg3_hwmon_close(tp);
11658
11659         tg3_phy_stop(tp);
11660
11661         tg3_full_lock(tp, 1);
11662
11663         tg3_disable_ints(tp);
11664
11665         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11666         tg3_free_rings(tp);
11667         tg3_flag_clear(tp, INIT_COMPLETE);
11668
11669         tg3_full_unlock(tp);
11670
11671         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11672                 struct tg3_napi *tnapi = &tp->napi[i];
11673                 free_irq(tnapi->irq_vec, tnapi);
11674         }
11675
11676         tg3_ints_fini(tp);
11677
11678         tg3_napi_fini(tp);
11679
11680         tg3_free_consistent(tp);
11681 }
11682
11683 static int tg3_open(struct net_device *dev)
11684 {
11685         struct tg3 *tp = netdev_priv(dev);
11686         int err;
11687
11688         if (tp->pcierr_recovery) {
11689                 netdev_err(dev, "Failed to open device. PCI error recovery "
11690                            "in progress\n");
11691                 return -EAGAIN;
11692         }
11693
11694         if (tp->fw_needed) {
11695                 err = tg3_request_firmware(tp);
11696                 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11697                         if (err) {
11698                                 netdev_warn(tp->dev, "EEE capability disabled\n");
11699                                 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11700                         } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11701                                 netdev_warn(tp->dev, "EEE capability restored\n");
11702                                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11703                         }
11704                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11705                         if (err)
11706                                 return err;
11707                 } else if (err) {
11708                         netdev_warn(tp->dev, "TSO capability disabled\n");
11709                         tg3_flag_clear(tp, TSO_CAPABLE);
11710                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11711                         netdev_notice(tp->dev, "TSO capability restored\n");
11712                         tg3_flag_set(tp, TSO_CAPABLE);
11713                 }
11714         }
11715
11716         tg3_carrier_off(tp);
11717
11718         err = tg3_power_up(tp);
11719         if (err)
11720                 return err;
11721
11722         tg3_full_lock(tp, 0);
11723
11724         tg3_disable_ints(tp);
11725         tg3_flag_clear(tp, INIT_COMPLETE);
11726
11727         tg3_full_unlock(tp);
11728
11729         err = tg3_start(tp,
11730                         !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11731                         true, true);
11732         if (err) {
11733                 tg3_frob_aux_power(tp, false);
11734                 pci_set_power_state(tp->pdev, PCI_D3hot);
11735         }
11736
11737         return err;
11738 }
11739
11740 static int tg3_close(struct net_device *dev)
11741 {
11742         struct tg3 *tp = netdev_priv(dev);
11743
11744         if (tp->pcierr_recovery) {
11745                 netdev_err(dev, "Failed to close device. PCI error recovery "
11746                            "in progress\n");
11747                 return -EAGAIN;
11748         }
11749
11750         tg3_stop(tp);
11751
11752         if (pci_device_is_present(tp->pdev)) {
11753                 tg3_power_down_prepare(tp);
11754
11755                 tg3_carrier_off(tp);
11756         }
11757         return 0;
11758 }
11759
11760 static inline u64 get_stat64(tg3_stat64_t *val)
11761 {
11762        return ((u64)val->high << 32) | ((u64)val->low);
11763 }
11764
11765 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11766 {
11767         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11768
11769         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11770             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11771              tg3_asic_rev(tp) == ASIC_REV_5701)) {
11772                 u32 val;
11773
11774                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11775                         tg3_writephy(tp, MII_TG3_TEST1,
11776                                      val | MII_TG3_TEST1_CRC_EN);
11777                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11778                 } else
11779                         val = 0;
11780
11781                 tp->phy_crc_errors += val;
11782
11783                 return tp->phy_crc_errors;
11784         }
11785
11786         return get_stat64(&hw_stats->rx_fcs_errors);
11787 }
11788
11789 #define ESTAT_ADD(member) \
11790         estats->member =        old_estats->member + \
11791                                 get_stat64(&hw_stats->member)
11792
11793 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11794 {
11795         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11796         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11797
11798         ESTAT_ADD(rx_octets);
11799         ESTAT_ADD(rx_fragments);
11800         ESTAT_ADD(rx_ucast_packets);
11801         ESTAT_ADD(rx_mcast_packets);
11802         ESTAT_ADD(rx_bcast_packets);
11803         ESTAT_ADD(rx_fcs_errors);
11804         ESTAT_ADD(rx_align_errors);
11805         ESTAT_ADD(rx_xon_pause_rcvd);
11806         ESTAT_ADD(rx_xoff_pause_rcvd);
11807         ESTAT_ADD(rx_mac_ctrl_rcvd);
11808         ESTAT_ADD(rx_xoff_entered);
11809         ESTAT_ADD(rx_frame_too_long_errors);
11810         ESTAT_ADD(rx_jabbers);
11811         ESTAT_ADD(rx_undersize_packets);
11812         ESTAT_ADD(rx_in_length_errors);
11813         ESTAT_ADD(rx_out_length_errors);
11814         ESTAT_ADD(rx_64_or_less_octet_packets);
11815         ESTAT_ADD(rx_65_to_127_octet_packets);
11816         ESTAT_ADD(rx_128_to_255_octet_packets);
11817         ESTAT_ADD(rx_256_to_511_octet_packets);
11818         ESTAT_ADD(rx_512_to_1023_octet_packets);
11819         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11820         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11821         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11822         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11823         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11824
11825         ESTAT_ADD(tx_octets);
11826         ESTAT_ADD(tx_collisions);
11827         ESTAT_ADD(tx_xon_sent);
11828         ESTAT_ADD(tx_xoff_sent);
11829         ESTAT_ADD(tx_flow_control);
11830         ESTAT_ADD(tx_mac_errors);
11831         ESTAT_ADD(tx_single_collisions);
11832         ESTAT_ADD(tx_mult_collisions);
11833         ESTAT_ADD(tx_deferred);
11834         ESTAT_ADD(tx_excessive_collisions);
11835         ESTAT_ADD(tx_late_collisions);
11836         ESTAT_ADD(tx_collide_2times);
11837         ESTAT_ADD(tx_collide_3times);
11838         ESTAT_ADD(tx_collide_4times);
11839         ESTAT_ADD(tx_collide_5times);
11840         ESTAT_ADD(tx_collide_6times);
11841         ESTAT_ADD(tx_collide_7times);
11842         ESTAT_ADD(tx_collide_8times);
11843         ESTAT_ADD(tx_collide_9times);
11844         ESTAT_ADD(tx_collide_10times);
11845         ESTAT_ADD(tx_collide_11times);
11846         ESTAT_ADD(tx_collide_12times);
11847         ESTAT_ADD(tx_collide_13times);
11848         ESTAT_ADD(tx_collide_14times);
11849         ESTAT_ADD(tx_collide_15times);
11850         ESTAT_ADD(tx_ucast_packets);
11851         ESTAT_ADD(tx_mcast_packets);
11852         ESTAT_ADD(tx_bcast_packets);
11853         ESTAT_ADD(tx_carrier_sense_errors);
11854         ESTAT_ADD(tx_discards);
11855         ESTAT_ADD(tx_errors);
11856
11857         ESTAT_ADD(dma_writeq_full);
11858         ESTAT_ADD(dma_write_prioq_full);
11859         ESTAT_ADD(rxbds_empty);
11860         ESTAT_ADD(rx_discards);
11861         ESTAT_ADD(rx_errors);
11862         ESTAT_ADD(rx_threshold_hit);
11863
11864         ESTAT_ADD(dma_readq_full);
11865         ESTAT_ADD(dma_read_prioq_full);
11866         ESTAT_ADD(tx_comp_queue_full);
11867
11868         ESTAT_ADD(ring_set_send_prod_index);
11869         ESTAT_ADD(ring_status_update);
11870         ESTAT_ADD(nic_irqs);
11871         ESTAT_ADD(nic_avoided_irqs);
11872         ESTAT_ADD(nic_tx_threshold_hit);
11873
11874         ESTAT_ADD(mbuf_lwm_thresh_hit);
11875 }
11876
11877 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11878 {
11879         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11880         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11881
11882         stats->rx_packets = old_stats->rx_packets +
11883                 get_stat64(&hw_stats->rx_ucast_packets) +
11884                 get_stat64(&hw_stats->rx_mcast_packets) +
11885                 get_stat64(&hw_stats->rx_bcast_packets);
11886
11887         stats->tx_packets = old_stats->tx_packets +
11888                 get_stat64(&hw_stats->tx_ucast_packets) +
11889                 get_stat64(&hw_stats->tx_mcast_packets) +
11890                 get_stat64(&hw_stats->tx_bcast_packets);
11891
11892         stats->rx_bytes = old_stats->rx_bytes +
11893                 get_stat64(&hw_stats->rx_octets);
11894         stats->tx_bytes = old_stats->tx_bytes +
11895                 get_stat64(&hw_stats->tx_octets);
11896
11897         stats->rx_errors = old_stats->rx_errors +
11898                 get_stat64(&hw_stats->rx_errors);
11899         stats->tx_errors = old_stats->tx_errors +
11900                 get_stat64(&hw_stats->tx_errors) +
11901                 get_stat64(&hw_stats->tx_mac_errors) +
11902                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11903                 get_stat64(&hw_stats->tx_discards);
11904
11905         stats->multicast = old_stats->multicast +
11906                 get_stat64(&hw_stats->rx_mcast_packets);
11907         stats->collisions = old_stats->collisions +
11908                 get_stat64(&hw_stats->tx_collisions);
11909
11910         stats->rx_length_errors = old_stats->rx_length_errors +
11911                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11912                 get_stat64(&hw_stats->rx_undersize_packets);
11913
11914         stats->rx_frame_errors = old_stats->rx_frame_errors +
11915                 get_stat64(&hw_stats->rx_align_errors);
11916         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11917                 get_stat64(&hw_stats->tx_discards);
11918         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11919                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11920
11921         stats->rx_crc_errors = old_stats->rx_crc_errors +
11922                 tg3_calc_crc_errors(tp);
11923
11924         stats->rx_missed_errors = old_stats->rx_missed_errors +
11925                 get_stat64(&hw_stats->rx_discards);
11926
11927         stats->rx_dropped = tp->rx_dropped;
11928         stats->tx_dropped = tp->tx_dropped;
11929 }
11930
11931 static int tg3_get_regs_len(struct net_device *dev)
11932 {
11933         return TG3_REG_BLK_SIZE;
11934 }
11935
11936 static void tg3_get_regs(struct net_device *dev,
11937                 struct ethtool_regs *regs, void *_p)
11938 {
11939         struct tg3 *tp = netdev_priv(dev);
11940
11941         regs->version = 0;
11942
11943         memset(_p, 0, TG3_REG_BLK_SIZE);
11944
11945         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11946                 return;
11947
11948         tg3_full_lock(tp, 0);
11949
11950         tg3_dump_legacy_regs(tp, (u32 *)_p);
11951
11952         tg3_full_unlock(tp);
11953 }
11954
11955 static int tg3_get_eeprom_len(struct net_device *dev)
11956 {
11957         struct tg3 *tp = netdev_priv(dev);
11958
11959         return tp->nvram_size;
11960 }
11961
11962 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11963 {
11964         struct tg3 *tp = netdev_priv(dev);
11965         int ret, cpmu_restore = 0;
11966         u8  *pd;
11967         u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11968         __be32 val;
11969
11970         if (tg3_flag(tp, NO_NVRAM))
11971                 return -EINVAL;
11972
11973         offset = eeprom->offset;
11974         len = eeprom->len;
11975         eeprom->len = 0;
11976
11977         eeprom->magic = TG3_EEPROM_MAGIC;
11978
11979         /* Override clock, link aware and link idle modes */
11980         if (tg3_flag(tp, CPMU_PRESENT)) {
11981                 cpmu_val = tr32(TG3_CPMU_CTRL);
11982                 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
11983                                 CPMU_CTRL_LINK_IDLE_MODE)) {
11984                         tw32(TG3_CPMU_CTRL, cpmu_val &
11985                                             ~(CPMU_CTRL_LINK_AWARE_MODE |
11986                                              CPMU_CTRL_LINK_IDLE_MODE));
11987                         cpmu_restore = 1;
11988                 }
11989         }
11990         tg3_override_clk(tp);
11991
11992         if (offset & 3) {
11993                 /* adjustments to start on required 4 byte boundary */
11994                 b_offset = offset & 3;
11995                 b_count = 4 - b_offset;
11996                 if (b_count > len) {
11997                         /* i.e. offset=1 len=2 */
11998                         b_count = len;
11999                 }
12000                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12001                 if (ret)
12002                         goto eeprom_done;
12003                 memcpy(data, ((char *)&val) + b_offset, b_count);
12004                 len -= b_count;
12005                 offset += b_count;
12006                 eeprom->len += b_count;
12007         }
12008
12009         /* read bytes up to the last 4 byte boundary */
12010         pd = &data[eeprom->len];
12011         for (i = 0; i < (len - (len & 3)); i += 4) {
12012                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12013                 if (ret) {
12014                         if (i)
12015                                 i -= 4;
12016                         eeprom->len += i;
12017                         goto eeprom_done;
12018                 }
12019                 memcpy(pd + i, &val, 4);
12020                 if (need_resched()) {
12021                         if (signal_pending(current)) {
12022                                 eeprom->len += i;
12023                                 ret = -EINTR;
12024                                 goto eeprom_done;
12025                         }
12026                         cond_resched();
12027                 }
12028         }
12029         eeprom->len += i;
12030
12031         if (len & 3) {
12032                 /* read last bytes not ending on 4 byte boundary */
12033                 pd = &data[eeprom->len];
12034                 b_count = len & 3;
12035                 b_offset = offset + len - b_count;
12036                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12037                 if (ret)
12038                         goto eeprom_done;
12039                 memcpy(pd, &val, b_count);
12040                 eeprom->len += b_count;
12041         }
12042         ret = 0;
12043
12044 eeprom_done:
12045         /* Restore clock, link aware and link idle modes */
12046         tg3_restore_clk(tp);
12047         if (cpmu_restore)
12048                 tw32(TG3_CPMU_CTRL, cpmu_val);
12049
12050         return ret;
12051 }
12052
12053 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12054 {
12055         struct tg3 *tp = netdev_priv(dev);
12056         int ret;
12057         u32 offset, len, b_offset, odd_len;
12058         u8 *buf;
12059         __be32 start = 0, end;
12060
12061         if (tg3_flag(tp, NO_NVRAM) ||
12062             eeprom->magic != TG3_EEPROM_MAGIC)
12063                 return -EINVAL;
12064
12065         offset = eeprom->offset;
12066         len = eeprom->len;
12067
12068         if ((b_offset = (offset & 3))) {
12069                 /* adjustments to start on required 4 byte boundary */
12070                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12071                 if (ret)
12072                         return ret;
12073                 len += b_offset;
12074                 offset &= ~3;
12075                 if (len < 4)
12076                         len = 4;
12077         }
12078
12079         odd_len = 0;
12080         if (len & 3) {
12081                 /* adjustments to end on required 4 byte boundary */
12082                 odd_len = 1;
12083                 len = (len + 3) & ~3;
12084                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12085                 if (ret)
12086                         return ret;
12087         }
12088
12089         buf = data;
12090         if (b_offset || odd_len) {
12091                 buf = kmalloc(len, GFP_KERNEL);
12092                 if (!buf)
12093                         return -ENOMEM;
12094                 if (b_offset)
12095                         memcpy(buf, &start, 4);
12096                 if (odd_len)
12097                         memcpy(buf+len-4, &end, 4);
12098                 memcpy(buf + b_offset, data, eeprom->len);
12099         }
12100
12101         ret = tg3_nvram_write_block(tp, offset, len, buf);
12102
12103         if (buf != data)
12104                 kfree(buf);
12105
12106         return ret;
12107 }
12108
12109 static int tg3_get_link_ksettings(struct net_device *dev,
12110                                   struct ethtool_link_ksettings *cmd)
12111 {
12112         struct tg3 *tp = netdev_priv(dev);
12113         u32 supported, advertising;
12114
12115         if (tg3_flag(tp, USE_PHYLIB)) {
12116                 struct phy_device *phydev;
12117                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12118                         return -EAGAIN;
12119                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12120                 phy_ethtool_ksettings_get(phydev, cmd);
12121
12122                 return 0;
12123         }
12124
12125         supported = (SUPPORTED_Autoneg);
12126
12127         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12128                 supported |= (SUPPORTED_1000baseT_Half |
12129                               SUPPORTED_1000baseT_Full);
12130
12131         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12132                 supported |= (SUPPORTED_100baseT_Half |
12133                               SUPPORTED_100baseT_Full |
12134                               SUPPORTED_10baseT_Half |
12135                               SUPPORTED_10baseT_Full |
12136                               SUPPORTED_TP);
12137                 cmd->base.port = PORT_TP;
12138         } else {
12139                 supported |= SUPPORTED_FIBRE;
12140                 cmd->base.port = PORT_FIBRE;
12141         }
12142         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12143                                                 supported);
12144
12145         advertising = tp->link_config.advertising;
12146         if (tg3_flag(tp, PAUSE_AUTONEG)) {
12147                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12148                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12149                                 advertising |= ADVERTISED_Pause;
12150                         } else {
12151                                 advertising |= ADVERTISED_Pause |
12152                                         ADVERTISED_Asym_Pause;
12153                         }
12154                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12155                         advertising |= ADVERTISED_Asym_Pause;
12156                 }
12157         }
12158         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12159                                                 advertising);
12160
12161         if (netif_running(dev) && tp->link_up) {
12162                 cmd->base.speed = tp->link_config.active_speed;
12163                 cmd->base.duplex = tp->link_config.active_duplex;
12164                 ethtool_convert_legacy_u32_to_link_mode(
12165                         cmd->link_modes.lp_advertising,
12166                         tp->link_config.rmt_adv);
12167
12168                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12169                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12170                                 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12171                         else
12172                                 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12173                 }
12174         } else {
12175                 cmd->base.speed = SPEED_UNKNOWN;
12176                 cmd->base.duplex = DUPLEX_UNKNOWN;
12177                 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12178         }
12179         cmd->base.phy_address = tp->phy_addr;
12180         cmd->base.autoneg = tp->link_config.autoneg;
12181         return 0;
12182 }
12183
12184 static int tg3_set_link_ksettings(struct net_device *dev,
12185                                   const struct ethtool_link_ksettings *cmd)
12186 {
12187         struct tg3 *tp = netdev_priv(dev);
12188         u32 speed = cmd->base.speed;
12189         u32 advertising;
12190
12191         if (tg3_flag(tp, USE_PHYLIB)) {
12192                 struct phy_device *phydev;
12193                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12194                         return -EAGAIN;
12195                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12196                 return phy_ethtool_ksettings_set(phydev, cmd);
12197         }
12198
12199         if (cmd->base.autoneg != AUTONEG_ENABLE &&
12200             cmd->base.autoneg != AUTONEG_DISABLE)
12201                 return -EINVAL;
12202
12203         if (cmd->base.autoneg == AUTONEG_DISABLE &&
12204             cmd->base.duplex != DUPLEX_FULL &&
12205             cmd->base.duplex != DUPLEX_HALF)
12206                 return -EINVAL;
12207
12208         ethtool_convert_link_mode_to_legacy_u32(&advertising,
12209                                                 cmd->link_modes.advertising);
12210
12211         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12212                 u32 mask = ADVERTISED_Autoneg |
12213                            ADVERTISED_Pause |
12214                            ADVERTISED_Asym_Pause;
12215
12216                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12217                         mask |= ADVERTISED_1000baseT_Half |
12218                                 ADVERTISED_1000baseT_Full;
12219
12220                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12221                         mask |= ADVERTISED_100baseT_Half |
12222                                 ADVERTISED_100baseT_Full |
12223                                 ADVERTISED_10baseT_Half |
12224                                 ADVERTISED_10baseT_Full |
12225                                 ADVERTISED_TP;
12226                 else
12227                         mask |= ADVERTISED_FIBRE;
12228
12229                 if (advertising & ~mask)
12230                         return -EINVAL;
12231
12232                 mask &= (ADVERTISED_1000baseT_Half |
12233                          ADVERTISED_1000baseT_Full |
12234                          ADVERTISED_100baseT_Half |
12235                          ADVERTISED_100baseT_Full |
12236                          ADVERTISED_10baseT_Half |
12237                          ADVERTISED_10baseT_Full);
12238
12239                 advertising &= mask;
12240         } else {
12241                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12242                         if (speed != SPEED_1000)
12243                                 return -EINVAL;
12244
12245                         if (cmd->base.duplex != DUPLEX_FULL)
12246                                 return -EINVAL;
12247                 } else {
12248                         if (speed != SPEED_100 &&
12249                             speed != SPEED_10)
12250                                 return -EINVAL;
12251                 }
12252         }
12253
12254         tg3_full_lock(tp, 0);
12255
12256         tp->link_config.autoneg = cmd->base.autoneg;
12257         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12258                 tp->link_config.advertising = (advertising |
12259                                               ADVERTISED_Autoneg);
12260                 tp->link_config.speed = SPEED_UNKNOWN;
12261                 tp->link_config.duplex = DUPLEX_UNKNOWN;
12262         } else {
12263                 tp->link_config.advertising = 0;
12264                 tp->link_config.speed = speed;
12265                 tp->link_config.duplex = cmd->base.duplex;
12266         }
12267
12268         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12269
12270         tg3_warn_mgmt_link_flap(tp);
12271
12272         if (netif_running(dev))
12273                 tg3_setup_phy(tp, true);
12274
12275         tg3_full_unlock(tp);
12276
12277         return 0;
12278 }
12279
12280 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12281 {
12282         struct tg3 *tp = netdev_priv(dev);
12283
12284         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12285         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12286         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12287         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12288 }
12289
12290 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12291 {
12292         struct tg3 *tp = netdev_priv(dev);
12293
12294         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12295                 wol->supported = WAKE_MAGIC;
12296         else
12297                 wol->supported = 0;
12298         wol->wolopts = 0;
12299         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12300                 wol->wolopts = WAKE_MAGIC;
12301         memset(&wol->sopass, 0, sizeof(wol->sopass));
12302 }
12303
12304 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12305 {
12306         struct tg3 *tp = netdev_priv(dev);
12307         struct device *dp = &tp->pdev->dev;
12308
12309         if (wol->wolopts & ~WAKE_MAGIC)
12310                 return -EINVAL;
12311         if ((wol->wolopts & WAKE_MAGIC) &&
12312             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12313                 return -EINVAL;
12314
12315         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12316
12317         if (device_may_wakeup(dp))
12318                 tg3_flag_set(tp, WOL_ENABLE);
12319         else
12320                 tg3_flag_clear(tp, WOL_ENABLE);
12321
12322         return 0;
12323 }
12324
12325 static u32 tg3_get_msglevel(struct net_device *dev)
12326 {
12327         struct tg3 *tp = netdev_priv(dev);
12328         return tp->msg_enable;
12329 }
12330
12331 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12332 {
12333         struct tg3 *tp = netdev_priv(dev);
12334         tp->msg_enable = value;
12335 }
12336
12337 static int tg3_nway_reset(struct net_device *dev)
12338 {
12339         struct tg3 *tp = netdev_priv(dev);
12340         int r;
12341
12342         if (!netif_running(dev))
12343                 return -EAGAIN;
12344
12345         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12346                 return -EINVAL;
12347
12348         tg3_warn_mgmt_link_flap(tp);
12349
12350         if (tg3_flag(tp, USE_PHYLIB)) {
12351                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12352                         return -EAGAIN;
12353                 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12354         } else {
12355                 u32 bmcr;
12356
12357                 spin_lock_bh(&tp->lock);
12358                 r = -EINVAL;
12359                 tg3_readphy(tp, MII_BMCR, &bmcr);
12360                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12361                     ((bmcr & BMCR_ANENABLE) ||
12362                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12363                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12364                                                    BMCR_ANENABLE);
12365                         r = 0;
12366                 }
12367                 spin_unlock_bh(&tp->lock);
12368         }
12369
12370         return r;
12371 }
12372
12373 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12374 {
12375         struct tg3 *tp = netdev_priv(dev);
12376
12377         ering->rx_max_pending = tp->rx_std_ring_mask;
12378         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12379                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12380         else
12381                 ering->rx_jumbo_max_pending = 0;
12382
12383         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12384
12385         ering->rx_pending = tp->rx_pending;
12386         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12387                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12388         else
12389                 ering->rx_jumbo_pending = 0;
12390
12391         ering->tx_pending = tp->napi[0].tx_pending;
12392 }
12393
12394 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12395 {
12396         struct tg3 *tp = netdev_priv(dev);
12397         int i, irq_sync = 0, err = 0;
12398         bool reset_phy = false;
12399
12400         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12401             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12402             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12403             (ering->tx_pending <= MAX_SKB_FRAGS) ||
12404             (tg3_flag(tp, TSO_BUG) &&
12405              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12406                 return -EINVAL;
12407
12408         if (netif_running(dev)) {
12409                 tg3_phy_stop(tp);
12410                 tg3_netif_stop(tp);
12411                 irq_sync = 1;
12412         }
12413
12414         tg3_full_lock(tp, irq_sync);
12415
12416         tp->rx_pending = ering->rx_pending;
12417
12418         if (tg3_flag(tp, MAX_RXPEND_64) &&
12419             tp->rx_pending > 63)
12420                 tp->rx_pending = 63;
12421
12422         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12423                 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12424
12425         for (i = 0; i < tp->irq_max; i++)
12426                 tp->napi[i].tx_pending = ering->tx_pending;
12427
12428         if (netif_running(dev)) {
12429                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12430                 /* Reset PHY to avoid PHY lock up */
12431                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12432                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
12433                     tg3_asic_rev(tp) == ASIC_REV_5720)
12434                         reset_phy = true;
12435
12436                 err = tg3_restart_hw(tp, reset_phy);
12437                 if (!err)
12438                         tg3_netif_start(tp);
12439         }
12440
12441         tg3_full_unlock(tp);
12442
12443         if (irq_sync && !err)
12444                 tg3_phy_start(tp);
12445
12446         return err;
12447 }
12448
12449 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12450 {
12451         struct tg3 *tp = netdev_priv(dev);
12452
12453         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12454
12455         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12456                 epause->rx_pause = 1;
12457         else
12458                 epause->rx_pause = 0;
12459
12460         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12461                 epause->tx_pause = 1;
12462         else
12463                 epause->tx_pause = 0;
12464 }
12465
12466 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12467 {
12468         struct tg3 *tp = netdev_priv(dev);
12469         int err = 0;
12470         bool reset_phy = false;
12471
12472         if (tp->link_config.autoneg == AUTONEG_ENABLE)
12473                 tg3_warn_mgmt_link_flap(tp);
12474
12475         if (tg3_flag(tp, USE_PHYLIB)) {
12476                 u32 newadv;
12477                 struct phy_device *phydev;
12478
12479                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12480
12481                 if (!(phydev->supported & SUPPORTED_Pause) ||
12482                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12483                      (epause->rx_pause != epause->tx_pause)))
12484                         return -EINVAL;
12485
12486                 tp->link_config.flowctrl = 0;
12487                 if (epause->rx_pause) {
12488                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12489
12490                         if (epause->tx_pause) {
12491                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12492                                 newadv = ADVERTISED_Pause;
12493                         } else
12494                                 newadv = ADVERTISED_Pause |
12495                                          ADVERTISED_Asym_Pause;
12496                 } else if (epause->tx_pause) {
12497                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12498                         newadv = ADVERTISED_Asym_Pause;
12499                 } else
12500                         newadv = 0;
12501
12502                 if (epause->autoneg)
12503                         tg3_flag_set(tp, PAUSE_AUTONEG);
12504                 else
12505                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12506
12507                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12508                         u32 oldadv = phydev->advertising &
12509                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12510                         if (oldadv != newadv) {
12511                                 phydev->advertising &=
12512                                         ~(ADVERTISED_Pause |
12513                                           ADVERTISED_Asym_Pause);
12514                                 phydev->advertising |= newadv;
12515                                 if (phydev->autoneg) {
12516                                         /*
12517                                          * Always renegotiate the link to
12518                                          * inform our link partner of our
12519                                          * flow control settings, even if the
12520                                          * flow control is forced.  Let
12521                                          * tg3_adjust_link() do the final
12522                                          * flow control setup.
12523                                          */
12524                                         return phy_start_aneg(phydev);
12525                                 }
12526                         }
12527
12528                         if (!epause->autoneg)
12529                                 tg3_setup_flow_control(tp, 0, 0);
12530                 } else {
12531                         tp->link_config.advertising &=
12532                                         ~(ADVERTISED_Pause |
12533                                           ADVERTISED_Asym_Pause);
12534                         tp->link_config.advertising |= newadv;
12535                 }
12536         } else {
12537                 int irq_sync = 0;
12538
12539                 if (netif_running(dev)) {
12540                         tg3_netif_stop(tp);
12541                         irq_sync = 1;
12542                 }
12543
12544                 tg3_full_lock(tp, irq_sync);
12545
12546                 if (epause->autoneg)
12547                         tg3_flag_set(tp, PAUSE_AUTONEG);
12548                 else
12549                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12550                 if (epause->rx_pause)
12551                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12552                 else
12553                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12554                 if (epause->tx_pause)
12555                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12556                 else
12557                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12558
12559                 if (netif_running(dev)) {
12560                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12561                         /* Reset PHY to avoid PHY lock up */
12562                         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12563                             tg3_asic_rev(tp) == ASIC_REV_5719 ||
12564                             tg3_asic_rev(tp) == ASIC_REV_5720)
12565                                 reset_phy = true;
12566
12567                         err = tg3_restart_hw(tp, reset_phy);
12568                         if (!err)
12569                                 tg3_netif_start(tp);
12570                 }
12571
12572                 tg3_full_unlock(tp);
12573         }
12574
12575         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12576
12577         return err;
12578 }
12579
12580 static int tg3_get_sset_count(struct net_device *dev, int sset)
12581 {
12582         switch (sset) {
12583         case ETH_SS_TEST:
12584                 return TG3_NUM_TEST;
12585         case ETH_SS_STATS:
12586                 return TG3_NUM_STATS;
12587         default:
12588                 return -EOPNOTSUPP;
12589         }
12590 }
12591
12592 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12593                          u32 *rules __always_unused)
12594 {
12595         struct tg3 *tp = netdev_priv(dev);
12596
12597         if (!tg3_flag(tp, SUPPORT_MSIX))
12598                 return -EOPNOTSUPP;
12599
12600         switch (info->cmd) {
12601         case ETHTOOL_GRXRINGS:
12602                 if (netif_running(tp->dev))
12603                         info->data = tp->rxq_cnt;
12604                 else {
12605                         info->data = num_online_cpus();
12606                         if (info->data > TG3_RSS_MAX_NUM_QS)
12607                                 info->data = TG3_RSS_MAX_NUM_QS;
12608                 }
12609
12610                 return 0;
12611
12612         default:
12613                 return -EOPNOTSUPP;
12614         }
12615 }
12616
12617 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12618 {
12619         u32 size = 0;
12620         struct tg3 *tp = netdev_priv(dev);
12621
12622         if (tg3_flag(tp, SUPPORT_MSIX))
12623                 size = TG3_RSS_INDIR_TBL_SIZE;
12624
12625         return size;
12626 }
12627
12628 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12629 {
12630         struct tg3 *tp = netdev_priv(dev);
12631         int i;
12632
12633         if (hfunc)
12634                 *hfunc = ETH_RSS_HASH_TOP;
12635         if (!indir)
12636                 return 0;
12637
12638         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12639                 indir[i] = tp->rss_ind_tbl[i];
12640
12641         return 0;
12642 }
12643
12644 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12645                         const u8 hfunc)
12646 {
12647         struct tg3 *tp = netdev_priv(dev);
12648         size_t i;
12649
12650         /* We require at least one supported parameter to be changed and no
12651          * change in any of the unsupported parameters
12652          */
12653         if (key ||
12654             (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12655                 return -EOPNOTSUPP;
12656
12657         if (!indir)
12658                 return 0;
12659
12660         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12661                 tp->rss_ind_tbl[i] = indir[i];
12662
12663         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12664                 return 0;
12665
12666         /* It is legal to write the indirection
12667          * table while the device is running.
12668          */
12669         tg3_full_lock(tp, 0);
12670         tg3_rss_write_indir_tbl(tp);
12671         tg3_full_unlock(tp);
12672
12673         return 0;
12674 }
12675
12676 static void tg3_get_channels(struct net_device *dev,
12677                              struct ethtool_channels *channel)
12678 {
12679         struct tg3 *tp = netdev_priv(dev);
12680         u32 deflt_qs = netif_get_num_default_rss_queues();
12681
12682         channel->max_rx = tp->rxq_max;
12683         channel->max_tx = tp->txq_max;
12684
12685         if (netif_running(dev)) {
12686                 channel->rx_count = tp->rxq_cnt;
12687                 channel->tx_count = tp->txq_cnt;
12688         } else {
12689                 if (tp->rxq_req)
12690                         channel->rx_count = tp->rxq_req;
12691                 else
12692                         channel->rx_count = min(deflt_qs, tp->rxq_max);
12693
12694                 if (tp->txq_req)
12695                         channel->tx_count = tp->txq_req;
12696                 else
12697                         channel->tx_count = min(deflt_qs, tp->txq_max);
12698         }
12699 }
12700
12701 static int tg3_set_channels(struct net_device *dev,
12702                             struct ethtool_channels *channel)
12703 {
12704         struct tg3 *tp = netdev_priv(dev);
12705
12706         if (!tg3_flag(tp, SUPPORT_MSIX))
12707                 return -EOPNOTSUPP;
12708
12709         if (channel->rx_count > tp->rxq_max ||
12710             channel->tx_count > tp->txq_max)
12711                 return -EINVAL;
12712
12713         tp->rxq_req = channel->rx_count;
12714         tp->txq_req = channel->tx_count;
12715
12716         if (!netif_running(dev))
12717                 return 0;
12718
12719         tg3_stop(tp);
12720
12721         tg3_carrier_off(tp);
12722
12723         tg3_start(tp, true, false, false);
12724
12725         return 0;
12726 }
12727
12728 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12729 {
12730         switch (stringset) {
12731         case ETH_SS_STATS:
12732                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12733                 break;
12734         case ETH_SS_TEST:
12735                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12736                 break;
12737         default:
12738                 WARN_ON(1);     /* we need a WARN() */
12739                 break;
12740         }
12741 }
12742
12743 static int tg3_set_phys_id(struct net_device *dev,
12744                             enum ethtool_phys_id_state state)
12745 {
12746         struct tg3 *tp = netdev_priv(dev);
12747
12748         if (!netif_running(tp->dev))
12749                 return -EAGAIN;
12750
12751         switch (state) {
12752         case ETHTOOL_ID_ACTIVE:
12753                 return 1;       /* cycle on/off once per second */
12754
12755         case ETHTOOL_ID_ON:
12756                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12757                      LED_CTRL_1000MBPS_ON |
12758                      LED_CTRL_100MBPS_ON |
12759                      LED_CTRL_10MBPS_ON |
12760                      LED_CTRL_TRAFFIC_OVERRIDE |
12761                      LED_CTRL_TRAFFIC_BLINK |
12762                      LED_CTRL_TRAFFIC_LED);
12763                 break;
12764
12765         case ETHTOOL_ID_OFF:
12766                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12767                      LED_CTRL_TRAFFIC_OVERRIDE);
12768                 break;
12769
12770         case ETHTOOL_ID_INACTIVE:
12771                 tw32(MAC_LED_CTRL, tp->led_ctrl);
12772                 break;
12773         }
12774
12775         return 0;
12776 }
12777
12778 static void tg3_get_ethtool_stats(struct net_device *dev,
12779                                    struct ethtool_stats *estats, u64 *tmp_stats)
12780 {
12781         struct tg3 *tp = netdev_priv(dev);
12782
12783         if (tp->hw_stats)
12784                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12785         else
12786                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12787 }
12788
12789 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12790 {
12791         int i;
12792         __be32 *buf;
12793         u32 offset = 0, len = 0;
12794         u32 magic, val;
12795
12796         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12797                 return NULL;
12798
12799         if (magic == TG3_EEPROM_MAGIC) {
12800                 for (offset = TG3_NVM_DIR_START;
12801                      offset < TG3_NVM_DIR_END;
12802                      offset += TG3_NVM_DIRENT_SIZE) {
12803                         if (tg3_nvram_read(tp, offset, &val))
12804                                 return NULL;
12805
12806                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12807                             TG3_NVM_DIRTYPE_EXTVPD)
12808                                 break;
12809                 }
12810
12811                 if (offset != TG3_NVM_DIR_END) {
12812                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12813                         if (tg3_nvram_read(tp, offset + 4, &offset))
12814                                 return NULL;
12815
12816                         offset = tg3_nvram_logical_addr(tp, offset);
12817                 }
12818         }
12819
12820         if (!offset || !len) {
12821                 offset = TG3_NVM_VPD_OFF;
12822                 len = TG3_NVM_VPD_LEN;
12823         }
12824
12825         buf = kmalloc(len, GFP_KERNEL);
12826         if (buf == NULL)
12827                 return NULL;
12828
12829         if (magic == TG3_EEPROM_MAGIC) {
12830                 for (i = 0; i < len; i += 4) {
12831                         /* The data is in little-endian format in NVRAM.
12832                          * Use the big-endian read routines to preserve
12833                          * the byte order as it exists in NVRAM.
12834                          */
12835                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12836                                 goto error;
12837                 }
12838         } else {
12839                 u8 *ptr;
12840                 ssize_t cnt;
12841                 unsigned int pos = 0;
12842
12843                 ptr = (u8 *)&buf[0];
12844                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12845                         cnt = pci_read_vpd(tp->pdev, pos,
12846                                            len - pos, ptr);
12847                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
12848                                 cnt = 0;
12849                         else if (cnt < 0)
12850                                 goto error;
12851                 }
12852                 if (pos != len)
12853                         goto error;
12854         }
12855
12856         *vpdlen = len;
12857
12858         return buf;
12859
12860 error:
12861         kfree(buf);
12862         return NULL;
12863 }
12864
12865 #define NVRAM_TEST_SIZE 0x100
12866 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
12867 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
12868 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
12869 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
12870 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
12871 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
12872 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12873 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12874
12875 static int tg3_test_nvram(struct tg3 *tp)
12876 {
12877         u32 csum, magic, len;
12878         __be32 *buf;
12879         int i, j, k, err = 0, size;
12880
12881         if (tg3_flag(tp, NO_NVRAM))
12882                 return 0;
12883
12884         if (tg3_nvram_read(tp, 0, &magic) != 0)
12885                 return -EIO;
12886
12887         if (magic == TG3_EEPROM_MAGIC)
12888                 size = NVRAM_TEST_SIZE;
12889         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12890                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12891                     TG3_EEPROM_SB_FORMAT_1) {
12892                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12893                         case TG3_EEPROM_SB_REVISION_0:
12894                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12895                                 break;
12896                         case TG3_EEPROM_SB_REVISION_2:
12897                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12898                                 break;
12899                         case TG3_EEPROM_SB_REVISION_3:
12900                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12901                                 break;
12902                         case TG3_EEPROM_SB_REVISION_4:
12903                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12904                                 break;
12905                         case TG3_EEPROM_SB_REVISION_5:
12906                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12907                                 break;
12908                         case TG3_EEPROM_SB_REVISION_6:
12909                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12910                                 break;
12911                         default:
12912                                 return -EIO;
12913                         }
12914                 } else
12915                         return 0;
12916         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12917                 size = NVRAM_SELFBOOT_HW_SIZE;
12918         else
12919                 return -EIO;
12920
12921         buf = kmalloc(size, GFP_KERNEL);
12922         if (buf == NULL)
12923                 return -ENOMEM;
12924
12925         err = -EIO;
12926         for (i = 0, j = 0; i < size; i += 4, j++) {
12927                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12928                 if (err)
12929                         break;
12930         }
12931         if (i < size)
12932                 goto out;
12933
12934         /* Selfboot format */
12935         magic = be32_to_cpu(buf[0]);
12936         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12937             TG3_EEPROM_MAGIC_FW) {
12938                 u8 *buf8 = (u8 *) buf, csum8 = 0;
12939
12940                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12941                     TG3_EEPROM_SB_REVISION_2) {
12942                         /* For rev 2, the csum doesn't include the MBA. */
12943                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12944                                 csum8 += buf8[i];
12945                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12946                                 csum8 += buf8[i];
12947                 } else {
12948                         for (i = 0; i < size; i++)
12949                                 csum8 += buf8[i];
12950                 }
12951
12952                 if (csum8 == 0) {
12953                         err = 0;
12954                         goto out;
12955                 }
12956
12957                 err = -EIO;
12958                 goto out;
12959         }
12960
12961         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12962             TG3_EEPROM_MAGIC_HW) {
12963                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12964                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12965                 u8 *buf8 = (u8 *) buf;
12966
12967                 /* Separate the parity bits and the data bytes.  */
12968                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12969                         if ((i == 0) || (i == 8)) {
12970                                 int l;
12971                                 u8 msk;
12972
12973                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12974                                         parity[k++] = buf8[i] & msk;
12975                                 i++;
12976                         } else if (i == 16) {
12977                                 int l;
12978                                 u8 msk;
12979
12980                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12981                                         parity[k++] = buf8[i] & msk;
12982                                 i++;
12983
12984                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12985                                         parity[k++] = buf8[i] & msk;
12986                                 i++;
12987                         }
12988                         data[j++] = buf8[i];
12989                 }
12990
12991                 err = -EIO;
12992                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12993                         u8 hw8 = hweight8(data[i]);
12994
12995                         if ((hw8 & 0x1) && parity[i])
12996                                 goto out;
12997                         else if (!(hw8 & 0x1) && !parity[i])
12998                                 goto out;
12999                 }
13000                 err = 0;
13001                 goto out;
13002         }
13003
13004         err = -EIO;
13005
13006         /* Bootstrap checksum at offset 0x10 */
13007         csum = calc_crc((unsigned char *) buf, 0x10);
13008         if (csum != le32_to_cpu(buf[0x10/4]))
13009                 goto out;
13010
13011         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13012         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
13013         if (csum != le32_to_cpu(buf[0xfc/4]))
13014                 goto out;
13015
13016         kfree(buf);
13017
13018         buf = tg3_vpd_readblock(tp, &len);
13019         if (!buf)
13020                 return -ENOMEM;
13021
13022         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
13023         if (i > 0) {
13024                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
13025                 if (j < 0)
13026                         goto out;
13027
13028                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
13029                         goto out;
13030
13031                 i += PCI_VPD_LRDT_TAG_SIZE;
13032                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
13033                                               PCI_VPD_RO_KEYWORD_CHKSUM);
13034                 if (j > 0) {
13035                         u8 csum8 = 0;
13036
13037                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
13038
13039                         for (i = 0; i <= j; i++)
13040                                 csum8 += ((u8 *)buf)[i];
13041
13042                         if (csum8)
13043                                 goto out;
13044                 }
13045         }
13046
13047         err = 0;
13048
13049 out:
13050         kfree(buf);
13051         return err;
13052 }
13053
13054 #define TG3_SERDES_TIMEOUT_SEC  2
13055 #define TG3_COPPER_TIMEOUT_SEC  6
13056
13057 static int tg3_test_link(struct tg3 *tp)
13058 {
13059         int i, max;
13060
13061         if (!netif_running(tp->dev))
13062                 return -ENODEV;
13063
13064         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13065                 max = TG3_SERDES_TIMEOUT_SEC;
13066         else
13067                 max = TG3_COPPER_TIMEOUT_SEC;
13068
13069         for (i = 0; i < max; i++) {
13070                 if (tp->link_up)
13071                         return 0;
13072
13073                 if (msleep_interruptible(1000))
13074                         break;
13075         }
13076
13077         return -EIO;
13078 }
13079
13080 /* Only test the commonly used registers */
13081 static int tg3_test_registers(struct tg3 *tp)
13082 {
13083         int i, is_5705, is_5750;
13084         u32 offset, read_mask, write_mask, val, save_val, read_val;
13085         static struct {
13086                 u16 offset;
13087                 u16 flags;
13088 #define TG3_FL_5705     0x1
13089 #define TG3_FL_NOT_5705 0x2
13090 #define TG3_FL_NOT_5788 0x4
13091 #define TG3_FL_NOT_5750 0x8
13092                 u32 read_mask;
13093                 u32 write_mask;
13094         } reg_tbl[] = {
13095                 /* MAC Control Registers */
13096                 { MAC_MODE, TG3_FL_NOT_5705,
13097                         0x00000000, 0x00ef6f8c },
13098                 { MAC_MODE, TG3_FL_5705,
13099                         0x00000000, 0x01ef6b8c },
13100                 { MAC_STATUS, TG3_FL_NOT_5705,
13101                         0x03800107, 0x00000000 },
13102                 { MAC_STATUS, TG3_FL_5705,
13103                         0x03800100, 0x00000000 },
13104                 { MAC_ADDR_0_HIGH, 0x0000,
13105                         0x00000000, 0x0000ffff },
13106                 { MAC_ADDR_0_LOW, 0x0000,
13107                         0x00000000, 0xffffffff },
13108                 { MAC_RX_MTU_SIZE, 0x0000,
13109                         0x00000000, 0x0000ffff },
13110                 { MAC_TX_MODE, 0x0000,
13111                         0x00000000, 0x00000070 },
13112                 { MAC_TX_LENGTHS, 0x0000,
13113                         0x00000000, 0x00003fff },
13114                 { MAC_RX_MODE, TG3_FL_NOT_5705,
13115                         0x00000000, 0x000007fc },
13116                 { MAC_RX_MODE, TG3_FL_5705,
13117                         0x00000000, 0x000007dc },
13118                 { MAC_HASH_REG_0, 0x0000,
13119                         0x00000000, 0xffffffff },
13120                 { MAC_HASH_REG_1, 0x0000,
13121                         0x00000000, 0xffffffff },
13122                 { MAC_HASH_REG_2, 0x0000,
13123                         0x00000000, 0xffffffff },
13124                 { MAC_HASH_REG_3, 0x0000,
13125                         0x00000000, 0xffffffff },
13126
13127                 /* Receive Data and Receive BD Initiator Control Registers. */
13128                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13129                         0x00000000, 0xffffffff },
13130                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13131                         0x00000000, 0xffffffff },
13132                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13133                         0x00000000, 0x00000003 },
13134                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13135                         0x00000000, 0xffffffff },
13136                 { RCVDBDI_STD_BD+0, 0x0000,
13137                         0x00000000, 0xffffffff },
13138                 { RCVDBDI_STD_BD+4, 0x0000,
13139                         0x00000000, 0xffffffff },
13140                 { RCVDBDI_STD_BD+8, 0x0000,
13141                         0x00000000, 0xffff0002 },
13142                 { RCVDBDI_STD_BD+0xc, 0x0000,
13143                         0x00000000, 0xffffffff },
13144
13145                 /* Receive BD Initiator Control Registers. */
13146                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13147                         0x00000000, 0xffffffff },
13148                 { RCVBDI_STD_THRESH, TG3_FL_5705,
13149                         0x00000000, 0x000003ff },
13150                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13151                         0x00000000, 0xffffffff },
13152
13153                 /* Host Coalescing Control Registers. */
13154                 { HOSTCC_MODE, TG3_FL_NOT_5705,
13155                         0x00000000, 0x00000004 },
13156                 { HOSTCC_MODE, TG3_FL_5705,
13157                         0x00000000, 0x000000f6 },
13158                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13159                         0x00000000, 0xffffffff },
13160                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13161                         0x00000000, 0x000003ff },
13162                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13163                         0x00000000, 0xffffffff },
13164                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13165                         0x00000000, 0x000003ff },
13166                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13167                         0x00000000, 0xffffffff },
13168                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13169                         0x00000000, 0x000000ff },
13170                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13171                         0x00000000, 0xffffffff },
13172                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13173                         0x00000000, 0x000000ff },
13174                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13175                         0x00000000, 0xffffffff },
13176                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13177                         0x00000000, 0xffffffff },
13178                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13179                         0x00000000, 0xffffffff },
13180                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13181                         0x00000000, 0x000000ff },
13182                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13183                         0x00000000, 0xffffffff },
13184                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13185                         0x00000000, 0x000000ff },
13186                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13187                         0x00000000, 0xffffffff },
13188                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13189                         0x00000000, 0xffffffff },
13190                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13191                         0x00000000, 0xffffffff },
13192                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13193                         0x00000000, 0xffffffff },
13194                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13195                         0x00000000, 0xffffffff },
13196                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13197                         0xffffffff, 0x00000000 },
13198                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13199                         0xffffffff, 0x00000000 },
13200
13201                 /* Buffer Manager Control Registers. */
13202                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13203                         0x00000000, 0x007fff80 },
13204                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13205                         0x00000000, 0x007fffff },
13206                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13207                         0x00000000, 0x0000003f },
13208                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13209                         0x00000000, 0x000001ff },
13210                 { BUFMGR_MB_HIGH_WATER, 0x0000,
13211                         0x00000000, 0x000001ff },
13212                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13213                         0xffffffff, 0x00000000 },
13214                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13215                         0xffffffff, 0x00000000 },
13216
13217                 /* Mailbox Registers */
13218                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13219                         0x00000000, 0x000001ff },
13220                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13221                         0x00000000, 0x000001ff },
13222                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13223                         0x00000000, 0x000007ff },
13224                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13225                         0x00000000, 0x000001ff },
13226
13227                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13228         };
13229
13230         is_5705 = is_5750 = 0;
13231         if (tg3_flag(tp, 5705_PLUS)) {
13232                 is_5705 = 1;
13233                 if (tg3_flag(tp, 5750_PLUS))
13234                         is_5750 = 1;
13235         }
13236
13237         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13238                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13239                         continue;
13240
13241                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13242                         continue;
13243
13244                 if (tg3_flag(tp, IS_5788) &&
13245                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
13246                         continue;
13247
13248                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13249                         continue;
13250
13251                 offset = (u32) reg_tbl[i].offset;
13252                 read_mask = reg_tbl[i].read_mask;
13253                 write_mask = reg_tbl[i].write_mask;
13254
13255                 /* Save the original register content */
13256                 save_val = tr32(offset);
13257
13258                 /* Determine the read-only value. */
13259                 read_val = save_val & read_mask;
13260
13261                 /* Write zero to the register, then make sure the read-only bits
13262                  * are not changed and the read/write bits are all zeros.
13263                  */
13264                 tw32(offset, 0);
13265
13266                 val = tr32(offset);
13267
13268                 /* Test the read-only and read/write bits. */
13269                 if (((val & read_mask) != read_val) || (val & write_mask))
13270                         goto out;
13271
13272                 /* Write ones to all the bits defined by RdMask and WrMask, then
13273                  * make sure the read-only bits are not changed and the
13274                  * read/write bits are all ones.
13275                  */
13276                 tw32(offset, read_mask | write_mask);
13277
13278                 val = tr32(offset);
13279
13280                 /* Test the read-only bits. */
13281                 if ((val & read_mask) != read_val)
13282                         goto out;
13283
13284                 /* Test the read/write bits. */
13285                 if ((val & write_mask) != write_mask)
13286                         goto out;
13287
13288                 tw32(offset, save_val);
13289         }
13290
13291         return 0;
13292
13293 out:
13294         if (netif_msg_hw(tp))
13295                 netdev_err(tp->dev,
13296                            "Register test failed at offset %x\n", offset);
13297         tw32(offset, save_val);
13298         return -EIO;
13299 }
13300
13301 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13302 {
13303         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13304         int i;
13305         u32 j;
13306
13307         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13308                 for (j = 0; j < len; j += 4) {
13309                         u32 val;
13310
13311                         tg3_write_mem(tp, offset + j, test_pattern[i]);
13312                         tg3_read_mem(tp, offset + j, &val);
13313                         if (val != test_pattern[i])
13314                                 return -EIO;
13315                 }
13316         }
13317         return 0;
13318 }
13319
13320 static int tg3_test_memory(struct tg3 *tp)
13321 {
13322         static struct mem_entry {
13323                 u32 offset;
13324                 u32 len;
13325         } mem_tbl_570x[] = {
13326                 { 0x00000000, 0x00b50},
13327                 { 0x00002000, 0x1c000},
13328                 { 0xffffffff, 0x00000}
13329         }, mem_tbl_5705[] = {
13330                 { 0x00000100, 0x0000c},
13331                 { 0x00000200, 0x00008},
13332                 { 0x00004000, 0x00800},
13333                 { 0x00006000, 0x01000},
13334                 { 0x00008000, 0x02000},
13335                 { 0x00010000, 0x0e000},
13336                 { 0xffffffff, 0x00000}
13337         }, mem_tbl_5755[] = {
13338                 { 0x00000200, 0x00008},
13339                 { 0x00004000, 0x00800},
13340                 { 0x00006000, 0x00800},
13341                 { 0x00008000, 0x02000},
13342                 { 0x00010000, 0x0c000},
13343                 { 0xffffffff, 0x00000}
13344         }, mem_tbl_5906[] = {
13345                 { 0x00000200, 0x00008},
13346                 { 0x00004000, 0x00400},
13347                 { 0x00006000, 0x00400},
13348                 { 0x00008000, 0x01000},
13349                 { 0x00010000, 0x01000},
13350                 { 0xffffffff, 0x00000}
13351         }, mem_tbl_5717[] = {
13352                 { 0x00000200, 0x00008},
13353                 { 0x00010000, 0x0a000},
13354                 { 0x00020000, 0x13c00},
13355                 { 0xffffffff, 0x00000}
13356         }, mem_tbl_57765[] = {
13357                 { 0x00000200, 0x00008},
13358                 { 0x00004000, 0x00800},
13359                 { 0x00006000, 0x09800},
13360                 { 0x00010000, 0x0a000},
13361                 { 0xffffffff, 0x00000}
13362         };
13363         struct mem_entry *mem_tbl;
13364         int err = 0;
13365         int i;
13366
13367         if (tg3_flag(tp, 5717_PLUS))
13368                 mem_tbl = mem_tbl_5717;
13369         else if (tg3_flag(tp, 57765_CLASS) ||
13370                  tg3_asic_rev(tp) == ASIC_REV_5762)
13371                 mem_tbl = mem_tbl_57765;
13372         else if (tg3_flag(tp, 5755_PLUS))
13373                 mem_tbl = mem_tbl_5755;
13374         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13375                 mem_tbl = mem_tbl_5906;
13376         else if (tg3_flag(tp, 5705_PLUS))
13377                 mem_tbl = mem_tbl_5705;
13378         else
13379                 mem_tbl = mem_tbl_570x;
13380
13381         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13382                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13383                 if (err)
13384                         break;
13385         }
13386
13387         return err;
13388 }
13389
13390 #define TG3_TSO_MSS             500
13391
13392 #define TG3_TSO_IP_HDR_LEN      20
13393 #define TG3_TSO_TCP_HDR_LEN     20
13394 #define TG3_TSO_TCP_OPT_LEN     12
13395
13396 static const u8 tg3_tso_header[] = {
13397 0x08, 0x00,
13398 0x45, 0x00, 0x00, 0x00,
13399 0x00, 0x00, 0x40, 0x00,
13400 0x40, 0x06, 0x00, 0x00,
13401 0x0a, 0x00, 0x00, 0x01,
13402 0x0a, 0x00, 0x00, 0x02,
13403 0x0d, 0x00, 0xe0, 0x00,
13404 0x00, 0x00, 0x01, 0x00,
13405 0x00, 0x00, 0x02, 0x00,
13406 0x80, 0x10, 0x10, 0x00,
13407 0x14, 0x09, 0x00, 0x00,
13408 0x01, 0x01, 0x08, 0x0a,
13409 0x11, 0x11, 0x11, 0x11,
13410 0x11, 0x11, 0x11, 0x11,
13411 };
13412
13413 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13414 {
13415         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13416         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13417         u32 budget;
13418         struct sk_buff *skb;
13419         u8 *tx_data, *rx_data;
13420         dma_addr_t map;
13421         int num_pkts, tx_len, rx_len, i, err;
13422         struct tg3_rx_buffer_desc *desc;
13423         struct tg3_napi *tnapi, *rnapi;
13424         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13425
13426         tnapi = &tp->napi[0];
13427         rnapi = &tp->napi[0];
13428         if (tp->irq_cnt > 1) {
13429                 if (tg3_flag(tp, ENABLE_RSS))
13430                         rnapi = &tp->napi[1];
13431                 if (tg3_flag(tp, ENABLE_TSS))
13432                         tnapi = &tp->napi[1];
13433         }
13434         coal_now = tnapi->coal_now | rnapi->coal_now;
13435
13436         err = -EIO;
13437
13438         tx_len = pktsz;
13439         skb = netdev_alloc_skb(tp->dev, tx_len);
13440         if (!skb)
13441                 return -ENOMEM;
13442
13443         tx_data = skb_put(skb, tx_len);
13444         memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13445         memset(tx_data + ETH_ALEN, 0x0, 8);
13446
13447         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13448
13449         if (tso_loopback) {
13450                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13451
13452                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13453                               TG3_TSO_TCP_OPT_LEN;
13454
13455                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13456                        sizeof(tg3_tso_header));
13457                 mss = TG3_TSO_MSS;
13458
13459                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13460                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13461
13462                 /* Set the total length field in the IP header */
13463                 iph->tot_len = htons((u16)(mss + hdr_len));
13464
13465                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13466                               TXD_FLAG_CPU_POST_DMA);
13467
13468                 if (tg3_flag(tp, HW_TSO_1) ||
13469                     tg3_flag(tp, HW_TSO_2) ||
13470                     tg3_flag(tp, HW_TSO_3)) {
13471                         struct tcphdr *th;
13472                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13473                         th = (struct tcphdr *)&tx_data[val];
13474                         th->check = 0;
13475                 } else
13476                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
13477
13478                 if (tg3_flag(tp, HW_TSO_3)) {
13479                         mss |= (hdr_len & 0xc) << 12;
13480                         if (hdr_len & 0x10)
13481                                 base_flags |= 0x00000010;
13482                         base_flags |= (hdr_len & 0x3e0) << 5;
13483                 } else if (tg3_flag(tp, HW_TSO_2))
13484                         mss |= hdr_len << 9;
13485                 else if (tg3_flag(tp, HW_TSO_1) ||
13486                          tg3_asic_rev(tp) == ASIC_REV_5705) {
13487                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13488                 } else {
13489                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13490                 }
13491
13492                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13493         } else {
13494                 num_pkts = 1;
13495                 data_off = ETH_HLEN;
13496
13497                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13498                     tx_len > VLAN_ETH_FRAME_LEN)
13499                         base_flags |= TXD_FLAG_JMB_PKT;
13500         }
13501
13502         for (i = data_off; i < tx_len; i++)
13503                 tx_data[i] = (u8) (i & 0xff);
13504
13505         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13506         if (pci_dma_mapping_error(tp->pdev, map)) {
13507                 dev_kfree_skb(skb);
13508                 return -EIO;
13509         }
13510
13511         val = tnapi->tx_prod;
13512         tnapi->tx_buffers[val].skb = skb;
13513         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13514
13515         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13516                rnapi->coal_now);
13517
13518         udelay(10);
13519
13520         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13521
13522         budget = tg3_tx_avail(tnapi);
13523         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13524                             base_flags | TXD_FLAG_END, mss, 0)) {
13525                 tnapi->tx_buffers[val].skb = NULL;
13526                 dev_kfree_skb(skb);
13527                 return -EIO;
13528         }
13529
13530         tnapi->tx_prod++;
13531
13532         /* Sync BD data before updating mailbox */
13533         wmb();
13534
13535         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13536         tr32_mailbox(tnapi->prodmbox);
13537
13538         udelay(10);
13539
13540         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13541         for (i = 0; i < 35; i++) {
13542                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13543                        coal_now);
13544
13545                 udelay(10);
13546
13547                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13548                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13549                 if ((tx_idx == tnapi->tx_prod) &&
13550                     (rx_idx == (rx_start_idx + num_pkts)))
13551                         break;
13552         }
13553
13554         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13555         dev_kfree_skb(skb);
13556
13557         if (tx_idx != tnapi->tx_prod)
13558                 goto out;
13559
13560         if (rx_idx != rx_start_idx + num_pkts)
13561                 goto out;
13562
13563         val = data_off;
13564         while (rx_idx != rx_start_idx) {
13565                 desc = &rnapi->rx_rcb[rx_start_idx++];
13566                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13567                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13568
13569                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13570                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13571                         goto out;
13572
13573                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13574                          - ETH_FCS_LEN;
13575
13576                 if (!tso_loopback) {
13577                         if (rx_len != tx_len)
13578                                 goto out;
13579
13580                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13581                                 if (opaque_key != RXD_OPAQUE_RING_STD)
13582                                         goto out;
13583                         } else {
13584                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13585                                         goto out;
13586                         }
13587                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13588                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13589                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
13590                         goto out;
13591                 }
13592
13593                 if (opaque_key == RXD_OPAQUE_RING_STD) {
13594                         rx_data = tpr->rx_std_buffers[desc_idx].data;
13595                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13596                                              mapping);
13597                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13598                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13599                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13600                                              mapping);
13601                 } else
13602                         goto out;
13603
13604                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13605                                             PCI_DMA_FROMDEVICE);
13606
13607                 rx_data += TG3_RX_OFFSET(tp);
13608                 for (i = data_off; i < rx_len; i++, val++) {
13609                         if (*(rx_data + i) != (u8) (val & 0xff))
13610                                 goto out;
13611                 }
13612         }
13613
13614         err = 0;
13615
13616         /* tg3_free_rings will unmap and free the rx_data */
13617 out:
13618         return err;
13619 }
13620
13621 #define TG3_STD_LOOPBACK_FAILED         1
13622 #define TG3_JMB_LOOPBACK_FAILED         2
13623 #define TG3_TSO_LOOPBACK_FAILED         4
13624 #define TG3_LOOPBACK_FAILED \
13625         (TG3_STD_LOOPBACK_FAILED | \
13626          TG3_JMB_LOOPBACK_FAILED | \
13627          TG3_TSO_LOOPBACK_FAILED)
13628
13629 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13630 {
13631         int err = -EIO;
13632         u32 eee_cap;
13633         u32 jmb_pkt_sz = 9000;
13634
13635         if (tp->dma_limit)
13636                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13637
13638         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13639         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13640
13641         if (!netif_running(tp->dev)) {
13642                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13643                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13644                 if (do_extlpbk)
13645                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13646                 goto done;
13647         }
13648
13649         err = tg3_reset_hw(tp, true);
13650         if (err) {
13651                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13652                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13653                 if (do_extlpbk)
13654                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13655                 goto done;
13656         }
13657
13658         if (tg3_flag(tp, ENABLE_RSS)) {
13659                 int i;
13660
13661                 /* Reroute all rx packets to the 1st queue */
13662                 for (i = MAC_RSS_INDIR_TBL_0;
13663                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13664                         tw32(i, 0x0);
13665         }
13666
13667         /* HW errata - mac loopback fails in some cases on 5780.
13668          * Normal traffic and PHY loopback are not affected by
13669          * errata.  Also, the MAC loopback test is deprecated for
13670          * all newer ASIC revisions.
13671          */
13672         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13673             !tg3_flag(tp, CPMU_PRESENT)) {
13674                 tg3_mac_loopback(tp, true);
13675
13676                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13677                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13678
13679                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13680                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13681                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13682
13683                 tg3_mac_loopback(tp, false);
13684         }
13685
13686         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13687             !tg3_flag(tp, USE_PHYLIB)) {
13688                 int i;
13689
13690                 tg3_phy_lpbk_set(tp, 0, false);
13691
13692                 /* Wait for link */
13693                 for (i = 0; i < 100; i++) {
13694                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13695                                 break;
13696                         mdelay(1);
13697                 }
13698
13699                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13700                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13701                 if (tg3_flag(tp, TSO_CAPABLE) &&
13702                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13703                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13704                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13705                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13706                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13707
13708                 if (do_extlpbk) {
13709                         tg3_phy_lpbk_set(tp, 0, true);
13710
13711                         /* All link indications report up, but the hardware
13712                          * isn't really ready for about 20 msec.  Double it
13713                          * to be sure.
13714                          */
13715                         mdelay(40);
13716
13717                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13718                                 data[TG3_EXT_LOOPB_TEST] |=
13719                                                         TG3_STD_LOOPBACK_FAILED;
13720                         if (tg3_flag(tp, TSO_CAPABLE) &&
13721                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13722                                 data[TG3_EXT_LOOPB_TEST] |=
13723                                                         TG3_TSO_LOOPBACK_FAILED;
13724                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13725                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13726                                 data[TG3_EXT_LOOPB_TEST] |=
13727                                                         TG3_JMB_LOOPBACK_FAILED;
13728                 }
13729
13730                 /* Re-enable gphy autopowerdown. */
13731                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13732                         tg3_phy_toggle_apd(tp, true);
13733         }
13734
13735         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13736                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13737
13738 done:
13739         tp->phy_flags |= eee_cap;
13740
13741         return err;
13742 }
13743
13744 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13745                           u64 *data)
13746 {
13747         struct tg3 *tp = netdev_priv(dev);
13748         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13749
13750         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13751                 if (tg3_power_up(tp)) {
13752                         etest->flags |= ETH_TEST_FL_FAILED;
13753                         memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13754                         return;
13755                 }
13756                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13757         }
13758
13759         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13760
13761         if (tg3_test_nvram(tp) != 0) {
13762                 etest->flags |= ETH_TEST_FL_FAILED;
13763                 data[TG3_NVRAM_TEST] = 1;
13764         }
13765         if (!doextlpbk && tg3_test_link(tp)) {
13766                 etest->flags |= ETH_TEST_FL_FAILED;
13767                 data[TG3_LINK_TEST] = 1;
13768         }
13769         if (etest->flags & ETH_TEST_FL_OFFLINE) {
13770                 int err, err2 = 0, irq_sync = 0;
13771
13772                 if (netif_running(dev)) {
13773                         tg3_phy_stop(tp);
13774                         tg3_netif_stop(tp);
13775                         irq_sync = 1;
13776                 }
13777
13778                 tg3_full_lock(tp, irq_sync);
13779                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13780                 err = tg3_nvram_lock(tp);
13781                 tg3_halt_cpu(tp, RX_CPU_BASE);
13782                 if (!tg3_flag(tp, 5705_PLUS))
13783                         tg3_halt_cpu(tp, TX_CPU_BASE);
13784                 if (!err)
13785                         tg3_nvram_unlock(tp);
13786
13787                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13788                         tg3_phy_reset(tp);
13789
13790                 if (tg3_test_registers(tp) != 0) {
13791                         etest->flags |= ETH_TEST_FL_FAILED;
13792                         data[TG3_REGISTER_TEST] = 1;
13793                 }
13794
13795                 if (tg3_test_memory(tp) != 0) {
13796                         etest->flags |= ETH_TEST_FL_FAILED;
13797                         data[TG3_MEMORY_TEST] = 1;
13798                 }
13799
13800                 if (doextlpbk)
13801                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13802
13803                 if (tg3_test_loopback(tp, data, doextlpbk))
13804                         etest->flags |= ETH_TEST_FL_FAILED;
13805
13806                 tg3_full_unlock(tp);
13807
13808                 if (tg3_test_interrupt(tp) != 0) {
13809                         etest->flags |= ETH_TEST_FL_FAILED;
13810                         data[TG3_INTERRUPT_TEST] = 1;
13811                 }
13812
13813                 tg3_full_lock(tp, 0);
13814
13815                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13816                 if (netif_running(dev)) {
13817                         tg3_flag_set(tp, INIT_COMPLETE);
13818                         err2 = tg3_restart_hw(tp, true);
13819                         if (!err2)
13820                                 tg3_netif_start(tp);
13821                 }
13822
13823                 tg3_full_unlock(tp);
13824
13825                 if (irq_sync && !err2)
13826                         tg3_phy_start(tp);
13827         }
13828         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13829                 tg3_power_down_prepare(tp);
13830
13831 }
13832
13833 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13834 {
13835         struct tg3 *tp = netdev_priv(dev);
13836         struct hwtstamp_config stmpconf;
13837
13838         if (!tg3_flag(tp, PTP_CAPABLE))
13839                 return -EOPNOTSUPP;
13840
13841         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13842                 return -EFAULT;
13843
13844         if (stmpconf.flags)
13845                 return -EINVAL;
13846
13847         if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13848             stmpconf.tx_type != HWTSTAMP_TX_OFF)
13849                 return -ERANGE;
13850
13851         switch (stmpconf.rx_filter) {
13852         case HWTSTAMP_FILTER_NONE:
13853                 tp->rxptpctl = 0;
13854                 break;
13855         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13856                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13857                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13858                 break;
13859         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13860                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13861                                TG3_RX_PTP_CTL_SYNC_EVNT;
13862                 break;
13863         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13864                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13865                                TG3_RX_PTP_CTL_DELAY_REQ;
13866                 break;
13867         case HWTSTAMP_FILTER_PTP_V2_EVENT:
13868                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13869                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13870                 break;
13871         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13872                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13873                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13874                 break;
13875         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13876                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13877                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13878                 break;
13879         case HWTSTAMP_FILTER_PTP_V2_SYNC:
13880                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13881                                TG3_RX_PTP_CTL_SYNC_EVNT;
13882                 break;
13883         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13884                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13885                                TG3_RX_PTP_CTL_SYNC_EVNT;
13886                 break;
13887         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13888                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13889                                TG3_RX_PTP_CTL_SYNC_EVNT;
13890                 break;
13891         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13892                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13893                                TG3_RX_PTP_CTL_DELAY_REQ;
13894                 break;
13895         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13896                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13897                                TG3_RX_PTP_CTL_DELAY_REQ;
13898                 break;
13899         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13900                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13901                                TG3_RX_PTP_CTL_DELAY_REQ;
13902                 break;
13903         default:
13904                 return -ERANGE;
13905         }
13906
13907         if (netif_running(dev) && tp->rxptpctl)
13908                 tw32(TG3_RX_PTP_CTL,
13909                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13910
13911         if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13912                 tg3_flag_set(tp, TX_TSTAMP_EN);
13913         else
13914                 tg3_flag_clear(tp, TX_TSTAMP_EN);
13915
13916         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13917                 -EFAULT : 0;
13918 }
13919
13920 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13921 {
13922         struct tg3 *tp = netdev_priv(dev);
13923         struct hwtstamp_config stmpconf;
13924
13925         if (!tg3_flag(tp, PTP_CAPABLE))
13926                 return -EOPNOTSUPP;
13927
13928         stmpconf.flags = 0;
13929         stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13930                             HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13931
13932         switch (tp->rxptpctl) {
13933         case 0:
13934                 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13935                 break;
13936         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13937                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13938                 break;
13939         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13940                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13941                 break;
13942         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13943                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13944                 break;
13945         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13946                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13947                 break;
13948         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13949                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13950                 break;
13951         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13952                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13953                 break;
13954         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13955                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13956                 break;
13957         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13958                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13959                 break;
13960         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13961                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13962                 break;
13963         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13964                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13965                 break;
13966         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13967                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13968                 break;
13969         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13970                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13971                 break;
13972         default:
13973                 WARN_ON_ONCE(1);
13974                 return -ERANGE;
13975         }
13976
13977         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13978                 -EFAULT : 0;
13979 }
13980
13981 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13982 {
13983         struct mii_ioctl_data *data = if_mii(ifr);
13984         struct tg3 *tp = netdev_priv(dev);
13985         int err;
13986
13987         if (tg3_flag(tp, USE_PHYLIB)) {
13988                 struct phy_device *phydev;
13989                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13990                         return -EAGAIN;
13991                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
13992                 return phy_mii_ioctl(phydev, ifr, cmd);
13993         }
13994
13995         switch (cmd) {
13996         case SIOCGMIIPHY:
13997                 data->phy_id = tp->phy_addr;
13998
13999                 /* fallthru */
14000         case SIOCGMIIREG: {
14001                 u32 mii_regval;
14002
14003                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14004                         break;                  /* We have no PHY */
14005
14006                 if (!netif_running(dev))
14007                         return -EAGAIN;
14008
14009                 spin_lock_bh(&tp->lock);
14010                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
14011                                     data->reg_num & 0x1f, &mii_regval);
14012                 spin_unlock_bh(&tp->lock);
14013
14014                 data->val_out = mii_regval;
14015
14016                 return err;
14017         }
14018
14019         case SIOCSMIIREG:
14020                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14021                         break;                  /* We have no PHY */
14022
14023                 if (!netif_running(dev))
14024                         return -EAGAIN;
14025
14026                 spin_lock_bh(&tp->lock);
14027                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
14028                                      data->reg_num & 0x1f, data->val_in);
14029                 spin_unlock_bh(&tp->lock);
14030
14031                 return err;
14032
14033         case SIOCSHWTSTAMP:
14034                 return tg3_hwtstamp_set(dev, ifr);
14035
14036         case SIOCGHWTSTAMP:
14037                 return tg3_hwtstamp_get(dev, ifr);
14038
14039         default:
14040                 /* do nothing */
14041                 break;
14042         }
14043         return -EOPNOTSUPP;
14044 }
14045
14046 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14047 {
14048         struct tg3 *tp = netdev_priv(dev);
14049
14050         memcpy(ec, &tp->coal, sizeof(*ec));
14051         return 0;
14052 }
14053
14054 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14055 {
14056         struct tg3 *tp = netdev_priv(dev);
14057         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14058         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14059
14060         if (!tg3_flag(tp, 5705_PLUS)) {
14061                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14062                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14063                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14064                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14065         }
14066
14067         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14068             (!ec->rx_coalesce_usecs) ||
14069             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14070             (!ec->tx_coalesce_usecs) ||
14071             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14072             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14073             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14074             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14075             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14076             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14077             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14078             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14079                 return -EINVAL;
14080
14081         /* Only copy relevant parameters, ignore all others. */
14082         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14083         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14084         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14085         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14086         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14087         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14088         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14089         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14090         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14091
14092         if (netif_running(dev)) {
14093                 tg3_full_lock(tp, 0);
14094                 __tg3_set_coalesce(tp, &tp->coal);
14095                 tg3_full_unlock(tp);
14096         }
14097         return 0;
14098 }
14099
14100 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14101 {
14102         struct tg3 *tp = netdev_priv(dev);
14103
14104         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14105                 netdev_warn(tp->dev, "Board does not support EEE!\n");
14106                 return -EOPNOTSUPP;
14107         }
14108
14109         if (edata->advertised != tp->eee.advertised) {
14110                 netdev_warn(tp->dev,
14111                             "Direct manipulation of EEE advertisement is not supported\n");
14112                 return -EINVAL;
14113         }
14114
14115         if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14116                 netdev_warn(tp->dev,
14117                             "Maximal Tx Lpi timer supported is %#x(u)\n",
14118                             TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14119                 return -EINVAL;
14120         }
14121
14122         tp->eee = *edata;
14123
14124         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14125         tg3_warn_mgmt_link_flap(tp);
14126
14127         if (netif_running(tp->dev)) {
14128                 tg3_full_lock(tp, 0);
14129                 tg3_setup_eee(tp);
14130                 tg3_phy_reset(tp);
14131                 tg3_full_unlock(tp);
14132         }
14133
14134         return 0;
14135 }
14136
14137 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14138 {
14139         struct tg3 *tp = netdev_priv(dev);
14140
14141         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14142                 netdev_warn(tp->dev,
14143                             "Board does not support EEE!\n");
14144                 return -EOPNOTSUPP;
14145         }
14146
14147         *edata = tp->eee;
14148         return 0;
14149 }
14150
14151 static const struct ethtool_ops tg3_ethtool_ops = {
14152         .get_drvinfo            = tg3_get_drvinfo,
14153         .get_regs_len           = tg3_get_regs_len,
14154         .get_regs               = tg3_get_regs,
14155         .get_wol                = tg3_get_wol,
14156         .set_wol                = tg3_set_wol,
14157         .get_msglevel           = tg3_get_msglevel,
14158         .set_msglevel           = tg3_set_msglevel,
14159         .nway_reset             = tg3_nway_reset,
14160         .get_link               = ethtool_op_get_link,
14161         .get_eeprom_len         = tg3_get_eeprom_len,
14162         .get_eeprom             = tg3_get_eeprom,
14163         .set_eeprom             = tg3_set_eeprom,
14164         .get_ringparam          = tg3_get_ringparam,
14165         .set_ringparam          = tg3_set_ringparam,
14166         .get_pauseparam         = tg3_get_pauseparam,
14167         .set_pauseparam         = tg3_set_pauseparam,
14168         .self_test              = tg3_self_test,
14169         .get_strings            = tg3_get_strings,
14170         .set_phys_id            = tg3_set_phys_id,
14171         .get_ethtool_stats      = tg3_get_ethtool_stats,
14172         .get_coalesce           = tg3_get_coalesce,
14173         .set_coalesce           = tg3_set_coalesce,
14174         .get_sset_count         = tg3_get_sset_count,
14175         .get_rxnfc              = tg3_get_rxnfc,
14176         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14177         .get_rxfh               = tg3_get_rxfh,
14178         .set_rxfh               = tg3_set_rxfh,
14179         .get_channels           = tg3_get_channels,
14180         .set_channels           = tg3_set_channels,
14181         .get_ts_info            = tg3_get_ts_info,
14182         .get_eee                = tg3_get_eee,
14183         .set_eee                = tg3_set_eee,
14184         .get_link_ksettings     = tg3_get_link_ksettings,
14185         .set_link_ksettings     = tg3_set_link_ksettings,
14186 };
14187
14188 static void tg3_get_stats64(struct net_device *dev,
14189                             struct rtnl_link_stats64 *stats)
14190 {
14191         struct tg3 *tp = netdev_priv(dev);
14192
14193         spin_lock_bh(&tp->lock);
14194         if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14195                 *stats = tp->net_stats_prev;
14196                 spin_unlock_bh(&tp->lock);
14197                 return;
14198         }
14199
14200         tg3_get_nstats(tp, stats);
14201         spin_unlock_bh(&tp->lock);
14202 }
14203
14204 static void tg3_set_rx_mode(struct net_device *dev)
14205 {
14206         struct tg3 *tp = netdev_priv(dev);
14207
14208         if (!netif_running(dev))
14209                 return;
14210
14211         tg3_full_lock(tp, 0);
14212         __tg3_set_rx_mode(dev);
14213         tg3_full_unlock(tp);
14214 }
14215
14216 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14217                                int new_mtu)
14218 {
14219         dev->mtu = new_mtu;
14220
14221         if (new_mtu > ETH_DATA_LEN) {
14222                 if (tg3_flag(tp, 5780_CLASS)) {
14223                         netdev_update_features(dev);
14224                         tg3_flag_clear(tp, TSO_CAPABLE);
14225                 } else {
14226                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
14227                 }
14228         } else {
14229                 if (tg3_flag(tp, 5780_CLASS)) {
14230                         tg3_flag_set(tp, TSO_CAPABLE);
14231                         netdev_update_features(dev);
14232                 }
14233                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14234         }
14235 }
14236
14237 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14238 {
14239         struct tg3 *tp = netdev_priv(dev);
14240         int err;
14241         bool reset_phy = false;
14242
14243         if (!netif_running(dev)) {
14244                 /* We'll just catch it later when the
14245                  * device is up'd.
14246                  */
14247                 tg3_set_mtu(dev, tp, new_mtu);
14248                 return 0;
14249         }
14250
14251         tg3_phy_stop(tp);
14252
14253         tg3_netif_stop(tp);
14254
14255         tg3_set_mtu(dev, tp, new_mtu);
14256
14257         tg3_full_lock(tp, 1);
14258
14259         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14260
14261         /* Reset PHY, otherwise the read DMA engine will be in a mode that
14262          * breaks all requests to 256 bytes.
14263          */
14264         if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14265             tg3_asic_rev(tp) == ASIC_REV_5717 ||
14266             tg3_asic_rev(tp) == ASIC_REV_5719 ||
14267             tg3_asic_rev(tp) == ASIC_REV_5720)
14268                 reset_phy = true;
14269
14270         err = tg3_restart_hw(tp, reset_phy);
14271
14272         if (!err)
14273                 tg3_netif_start(tp);
14274
14275         tg3_full_unlock(tp);
14276
14277         if (!err)
14278                 tg3_phy_start(tp);
14279
14280         return err;
14281 }
14282
14283 static const struct net_device_ops tg3_netdev_ops = {
14284         .ndo_open               = tg3_open,
14285         .ndo_stop               = tg3_close,
14286         .ndo_start_xmit         = tg3_start_xmit,
14287         .ndo_get_stats64        = tg3_get_stats64,
14288         .ndo_validate_addr      = eth_validate_addr,
14289         .ndo_set_rx_mode        = tg3_set_rx_mode,
14290         .ndo_set_mac_address    = tg3_set_mac_addr,
14291         .ndo_do_ioctl           = tg3_ioctl,
14292         .ndo_tx_timeout         = tg3_tx_timeout,
14293         .ndo_change_mtu         = tg3_change_mtu,
14294         .ndo_fix_features       = tg3_fix_features,
14295         .ndo_set_features       = tg3_set_features,
14296 #ifdef CONFIG_NET_POLL_CONTROLLER
14297         .ndo_poll_controller    = tg3_poll_controller,
14298 #endif
14299 };
14300
14301 static void tg3_get_eeprom_size(struct tg3 *tp)
14302 {
14303         u32 cursize, val, magic;
14304
14305         tp->nvram_size = EEPROM_CHIP_SIZE;
14306
14307         if (tg3_nvram_read(tp, 0, &magic) != 0)
14308                 return;
14309
14310         if ((magic != TG3_EEPROM_MAGIC) &&
14311             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14312             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14313                 return;
14314
14315         /*
14316          * Size the chip by reading offsets at increasing powers of two.
14317          * When we encounter our validation signature, we know the addressing
14318          * has wrapped around, and thus have our chip size.
14319          */
14320         cursize = 0x10;
14321
14322         while (cursize < tp->nvram_size) {
14323                 if (tg3_nvram_read(tp, cursize, &val) != 0)
14324                         return;
14325
14326                 if (val == magic)
14327                         break;
14328
14329                 cursize <<= 1;
14330         }
14331
14332         tp->nvram_size = cursize;
14333 }
14334
14335 static void tg3_get_nvram_size(struct tg3 *tp)
14336 {
14337         u32 val;
14338
14339         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14340                 return;
14341
14342         /* Selfboot format */
14343         if (val != TG3_EEPROM_MAGIC) {
14344                 tg3_get_eeprom_size(tp);
14345                 return;
14346         }
14347
14348         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14349                 if (val != 0) {
14350                         /* This is confusing.  We want to operate on the
14351                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14352                          * call will read from NVRAM and byteswap the data
14353                          * according to the byteswapping settings for all
14354                          * other register accesses.  This ensures the data we
14355                          * want will always reside in the lower 16-bits.
14356                          * However, the data in NVRAM is in LE format, which
14357                          * means the data from the NVRAM read will always be
14358                          * opposite the endianness of the CPU.  The 16-bit
14359                          * byteswap then brings the data to CPU endianness.
14360                          */
14361                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14362                         return;
14363                 }
14364         }
14365         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14366 }
14367
14368 static void tg3_get_nvram_info(struct tg3 *tp)
14369 {
14370         u32 nvcfg1;
14371
14372         nvcfg1 = tr32(NVRAM_CFG1);
14373         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14374                 tg3_flag_set(tp, FLASH);
14375         } else {
14376                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14377                 tw32(NVRAM_CFG1, nvcfg1);
14378         }
14379
14380         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14381             tg3_flag(tp, 5780_CLASS)) {
14382                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14383                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14384                         tp->nvram_jedecnum = JEDEC_ATMEL;
14385                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14386                         tg3_flag_set(tp, NVRAM_BUFFERED);
14387                         break;
14388                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14389                         tp->nvram_jedecnum = JEDEC_ATMEL;
14390                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14391                         break;
14392                 case FLASH_VENDOR_ATMEL_EEPROM:
14393                         tp->nvram_jedecnum = JEDEC_ATMEL;
14394                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14395                         tg3_flag_set(tp, NVRAM_BUFFERED);
14396                         break;
14397                 case FLASH_VENDOR_ST:
14398                         tp->nvram_jedecnum = JEDEC_ST;
14399                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14400                         tg3_flag_set(tp, NVRAM_BUFFERED);
14401                         break;
14402                 case FLASH_VENDOR_SAIFUN:
14403                         tp->nvram_jedecnum = JEDEC_SAIFUN;
14404                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14405                         break;
14406                 case FLASH_VENDOR_SST_SMALL:
14407                 case FLASH_VENDOR_SST_LARGE:
14408                         tp->nvram_jedecnum = JEDEC_SST;
14409                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14410                         break;
14411                 }
14412         } else {
14413                 tp->nvram_jedecnum = JEDEC_ATMEL;
14414                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14415                 tg3_flag_set(tp, NVRAM_BUFFERED);
14416         }
14417 }
14418
14419 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14420 {
14421         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14422         case FLASH_5752PAGE_SIZE_256:
14423                 tp->nvram_pagesize = 256;
14424                 break;
14425         case FLASH_5752PAGE_SIZE_512:
14426                 tp->nvram_pagesize = 512;
14427                 break;
14428         case FLASH_5752PAGE_SIZE_1K:
14429                 tp->nvram_pagesize = 1024;
14430                 break;
14431         case FLASH_5752PAGE_SIZE_2K:
14432                 tp->nvram_pagesize = 2048;
14433                 break;
14434         case FLASH_5752PAGE_SIZE_4K:
14435                 tp->nvram_pagesize = 4096;
14436                 break;
14437         case FLASH_5752PAGE_SIZE_264:
14438                 tp->nvram_pagesize = 264;
14439                 break;
14440         case FLASH_5752PAGE_SIZE_528:
14441                 tp->nvram_pagesize = 528;
14442                 break;
14443         }
14444 }
14445
14446 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14447 {
14448         u32 nvcfg1;
14449
14450         nvcfg1 = tr32(NVRAM_CFG1);
14451
14452         /* NVRAM protection for TPM */
14453         if (nvcfg1 & (1 << 27))
14454                 tg3_flag_set(tp, PROTECTED_NVRAM);
14455
14456         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14457         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14458         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14459                 tp->nvram_jedecnum = JEDEC_ATMEL;
14460                 tg3_flag_set(tp, NVRAM_BUFFERED);
14461                 break;
14462         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14463                 tp->nvram_jedecnum = JEDEC_ATMEL;
14464                 tg3_flag_set(tp, NVRAM_BUFFERED);
14465                 tg3_flag_set(tp, FLASH);
14466                 break;
14467         case FLASH_5752VENDOR_ST_M45PE10:
14468         case FLASH_5752VENDOR_ST_M45PE20:
14469         case FLASH_5752VENDOR_ST_M45PE40:
14470                 tp->nvram_jedecnum = JEDEC_ST;
14471                 tg3_flag_set(tp, NVRAM_BUFFERED);
14472                 tg3_flag_set(tp, FLASH);
14473                 break;
14474         }
14475
14476         if (tg3_flag(tp, FLASH)) {
14477                 tg3_nvram_get_pagesize(tp, nvcfg1);
14478         } else {
14479                 /* For eeprom, set pagesize to maximum eeprom size */
14480                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14481
14482                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14483                 tw32(NVRAM_CFG1, nvcfg1);
14484         }
14485 }
14486
14487 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14488 {
14489         u32 nvcfg1, protect = 0;
14490
14491         nvcfg1 = tr32(NVRAM_CFG1);
14492
14493         /* NVRAM protection for TPM */
14494         if (nvcfg1 & (1 << 27)) {
14495                 tg3_flag_set(tp, PROTECTED_NVRAM);
14496                 protect = 1;
14497         }
14498
14499         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14500         switch (nvcfg1) {
14501         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14502         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14503         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14504         case FLASH_5755VENDOR_ATMEL_FLASH_5:
14505                 tp->nvram_jedecnum = JEDEC_ATMEL;
14506                 tg3_flag_set(tp, NVRAM_BUFFERED);
14507                 tg3_flag_set(tp, FLASH);
14508                 tp->nvram_pagesize = 264;
14509                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14510                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14511                         tp->nvram_size = (protect ? 0x3e200 :
14512                                           TG3_NVRAM_SIZE_512KB);
14513                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14514                         tp->nvram_size = (protect ? 0x1f200 :
14515                                           TG3_NVRAM_SIZE_256KB);
14516                 else
14517                         tp->nvram_size = (protect ? 0x1f200 :
14518                                           TG3_NVRAM_SIZE_128KB);
14519                 break;
14520         case FLASH_5752VENDOR_ST_M45PE10:
14521         case FLASH_5752VENDOR_ST_M45PE20:
14522         case FLASH_5752VENDOR_ST_M45PE40:
14523                 tp->nvram_jedecnum = JEDEC_ST;
14524                 tg3_flag_set(tp, NVRAM_BUFFERED);
14525                 tg3_flag_set(tp, FLASH);
14526                 tp->nvram_pagesize = 256;
14527                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14528                         tp->nvram_size = (protect ?
14529                                           TG3_NVRAM_SIZE_64KB :
14530                                           TG3_NVRAM_SIZE_128KB);
14531                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14532                         tp->nvram_size = (protect ?
14533                                           TG3_NVRAM_SIZE_64KB :
14534                                           TG3_NVRAM_SIZE_256KB);
14535                 else
14536                         tp->nvram_size = (protect ?
14537                                           TG3_NVRAM_SIZE_128KB :
14538                                           TG3_NVRAM_SIZE_512KB);
14539                 break;
14540         }
14541 }
14542
14543 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14544 {
14545         u32 nvcfg1;
14546
14547         nvcfg1 = tr32(NVRAM_CFG1);
14548
14549         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14550         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14551         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14552         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14553         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14554                 tp->nvram_jedecnum = JEDEC_ATMEL;
14555                 tg3_flag_set(tp, NVRAM_BUFFERED);
14556                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14557
14558                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14559                 tw32(NVRAM_CFG1, nvcfg1);
14560                 break;
14561         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14562         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14563         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14564         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14565                 tp->nvram_jedecnum = JEDEC_ATMEL;
14566                 tg3_flag_set(tp, NVRAM_BUFFERED);
14567                 tg3_flag_set(tp, FLASH);
14568                 tp->nvram_pagesize = 264;
14569                 break;
14570         case FLASH_5752VENDOR_ST_M45PE10:
14571         case FLASH_5752VENDOR_ST_M45PE20:
14572         case FLASH_5752VENDOR_ST_M45PE40:
14573                 tp->nvram_jedecnum = JEDEC_ST;
14574                 tg3_flag_set(tp, NVRAM_BUFFERED);
14575                 tg3_flag_set(tp, FLASH);
14576                 tp->nvram_pagesize = 256;
14577                 break;
14578         }
14579 }
14580
14581 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14582 {
14583         u32 nvcfg1, protect = 0;
14584
14585         nvcfg1 = tr32(NVRAM_CFG1);
14586
14587         /* NVRAM protection for TPM */
14588         if (nvcfg1 & (1 << 27)) {
14589                 tg3_flag_set(tp, PROTECTED_NVRAM);
14590                 protect = 1;
14591         }
14592
14593         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14594         switch (nvcfg1) {
14595         case FLASH_5761VENDOR_ATMEL_ADB021D:
14596         case FLASH_5761VENDOR_ATMEL_ADB041D:
14597         case FLASH_5761VENDOR_ATMEL_ADB081D:
14598         case FLASH_5761VENDOR_ATMEL_ADB161D:
14599         case FLASH_5761VENDOR_ATMEL_MDB021D:
14600         case FLASH_5761VENDOR_ATMEL_MDB041D:
14601         case FLASH_5761VENDOR_ATMEL_MDB081D:
14602         case FLASH_5761VENDOR_ATMEL_MDB161D:
14603                 tp->nvram_jedecnum = JEDEC_ATMEL;
14604                 tg3_flag_set(tp, NVRAM_BUFFERED);
14605                 tg3_flag_set(tp, FLASH);
14606                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14607                 tp->nvram_pagesize = 256;
14608                 break;
14609         case FLASH_5761VENDOR_ST_A_M45PE20:
14610         case FLASH_5761VENDOR_ST_A_M45PE40:
14611         case FLASH_5761VENDOR_ST_A_M45PE80:
14612         case FLASH_5761VENDOR_ST_A_M45PE16:
14613         case FLASH_5761VENDOR_ST_M_M45PE20:
14614         case FLASH_5761VENDOR_ST_M_M45PE40:
14615         case FLASH_5761VENDOR_ST_M_M45PE80:
14616         case FLASH_5761VENDOR_ST_M_M45PE16:
14617                 tp->nvram_jedecnum = JEDEC_ST;
14618                 tg3_flag_set(tp, NVRAM_BUFFERED);
14619                 tg3_flag_set(tp, FLASH);
14620                 tp->nvram_pagesize = 256;
14621                 break;
14622         }
14623
14624         if (protect) {
14625                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14626         } else {
14627                 switch (nvcfg1) {
14628                 case FLASH_5761VENDOR_ATMEL_ADB161D:
14629                 case FLASH_5761VENDOR_ATMEL_MDB161D:
14630                 case FLASH_5761VENDOR_ST_A_M45PE16:
14631                 case FLASH_5761VENDOR_ST_M_M45PE16:
14632                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14633                         break;
14634                 case FLASH_5761VENDOR_ATMEL_ADB081D:
14635                 case FLASH_5761VENDOR_ATMEL_MDB081D:
14636                 case FLASH_5761VENDOR_ST_A_M45PE80:
14637                 case FLASH_5761VENDOR_ST_M_M45PE80:
14638                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14639                         break;
14640                 case FLASH_5761VENDOR_ATMEL_ADB041D:
14641                 case FLASH_5761VENDOR_ATMEL_MDB041D:
14642                 case FLASH_5761VENDOR_ST_A_M45PE40:
14643                 case FLASH_5761VENDOR_ST_M_M45PE40:
14644                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14645                         break;
14646                 case FLASH_5761VENDOR_ATMEL_ADB021D:
14647                 case FLASH_5761VENDOR_ATMEL_MDB021D:
14648                 case FLASH_5761VENDOR_ST_A_M45PE20:
14649                 case FLASH_5761VENDOR_ST_M_M45PE20:
14650                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14651                         break;
14652                 }
14653         }
14654 }
14655
14656 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14657 {
14658         tp->nvram_jedecnum = JEDEC_ATMEL;
14659         tg3_flag_set(tp, NVRAM_BUFFERED);
14660         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14661 }
14662
14663 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14664 {
14665         u32 nvcfg1;
14666
14667         nvcfg1 = tr32(NVRAM_CFG1);
14668
14669         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14670         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14671         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14672                 tp->nvram_jedecnum = JEDEC_ATMEL;
14673                 tg3_flag_set(tp, NVRAM_BUFFERED);
14674                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14675
14676                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14677                 tw32(NVRAM_CFG1, nvcfg1);
14678                 return;
14679         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14680         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14681         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14682         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14683         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14684         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14685         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14686                 tp->nvram_jedecnum = JEDEC_ATMEL;
14687                 tg3_flag_set(tp, NVRAM_BUFFERED);
14688                 tg3_flag_set(tp, FLASH);
14689
14690                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14691                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14692                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14693                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14694                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14695                         break;
14696                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14697                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14698                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14699                         break;
14700                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14701                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14702                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14703                         break;
14704                 }
14705                 break;
14706         case FLASH_5752VENDOR_ST_M45PE10:
14707         case FLASH_5752VENDOR_ST_M45PE20:
14708         case FLASH_5752VENDOR_ST_M45PE40:
14709                 tp->nvram_jedecnum = JEDEC_ST;
14710                 tg3_flag_set(tp, NVRAM_BUFFERED);
14711                 tg3_flag_set(tp, FLASH);
14712
14713                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14714                 case FLASH_5752VENDOR_ST_M45PE10:
14715                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14716                         break;
14717                 case FLASH_5752VENDOR_ST_M45PE20:
14718                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14719                         break;
14720                 case FLASH_5752VENDOR_ST_M45PE40:
14721                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14722                         break;
14723                 }
14724                 break;
14725         default:
14726                 tg3_flag_set(tp, NO_NVRAM);
14727                 return;
14728         }
14729
14730         tg3_nvram_get_pagesize(tp, nvcfg1);
14731         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14732                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14733 }
14734
14735
14736 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14737 {
14738         u32 nvcfg1;
14739
14740         nvcfg1 = tr32(NVRAM_CFG1);
14741
14742         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14743         case FLASH_5717VENDOR_ATMEL_EEPROM:
14744         case FLASH_5717VENDOR_MICRO_EEPROM:
14745                 tp->nvram_jedecnum = JEDEC_ATMEL;
14746                 tg3_flag_set(tp, NVRAM_BUFFERED);
14747                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14748
14749                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14750                 tw32(NVRAM_CFG1, nvcfg1);
14751                 return;
14752         case FLASH_5717VENDOR_ATMEL_MDB011D:
14753         case FLASH_5717VENDOR_ATMEL_ADB011B:
14754         case FLASH_5717VENDOR_ATMEL_ADB011D:
14755         case FLASH_5717VENDOR_ATMEL_MDB021D:
14756         case FLASH_5717VENDOR_ATMEL_ADB021B:
14757         case FLASH_5717VENDOR_ATMEL_ADB021D:
14758         case FLASH_5717VENDOR_ATMEL_45USPT:
14759                 tp->nvram_jedecnum = JEDEC_ATMEL;
14760                 tg3_flag_set(tp, NVRAM_BUFFERED);
14761                 tg3_flag_set(tp, FLASH);
14762
14763                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14764                 case FLASH_5717VENDOR_ATMEL_MDB021D:
14765                         /* Detect size with tg3_nvram_get_size() */
14766                         break;
14767                 case FLASH_5717VENDOR_ATMEL_ADB021B:
14768                 case FLASH_5717VENDOR_ATMEL_ADB021D:
14769                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14770                         break;
14771                 default:
14772                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14773                         break;
14774                 }
14775                 break;
14776         case FLASH_5717VENDOR_ST_M_M25PE10:
14777         case FLASH_5717VENDOR_ST_A_M25PE10:
14778         case FLASH_5717VENDOR_ST_M_M45PE10:
14779         case FLASH_5717VENDOR_ST_A_M45PE10:
14780         case FLASH_5717VENDOR_ST_M_M25PE20:
14781         case FLASH_5717VENDOR_ST_A_M25PE20:
14782         case FLASH_5717VENDOR_ST_M_M45PE20:
14783         case FLASH_5717VENDOR_ST_A_M45PE20:
14784         case FLASH_5717VENDOR_ST_25USPT:
14785         case FLASH_5717VENDOR_ST_45USPT:
14786                 tp->nvram_jedecnum = JEDEC_ST;
14787                 tg3_flag_set(tp, NVRAM_BUFFERED);
14788                 tg3_flag_set(tp, FLASH);
14789
14790                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14791                 case FLASH_5717VENDOR_ST_M_M25PE20:
14792                 case FLASH_5717VENDOR_ST_M_M45PE20:
14793                         /* Detect size with tg3_nvram_get_size() */
14794                         break;
14795                 case FLASH_5717VENDOR_ST_A_M25PE20:
14796                 case FLASH_5717VENDOR_ST_A_M45PE20:
14797                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14798                         break;
14799                 default:
14800                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14801                         break;
14802                 }
14803                 break;
14804         default:
14805                 tg3_flag_set(tp, NO_NVRAM);
14806                 return;
14807         }
14808
14809         tg3_nvram_get_pagesize(tp, nvcfg1);
14810         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14811                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14812 }
14813
14814 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14815 {
14816         u32 nvcfg1, nvmpinstrp;
14817
14818         nvcfg1 = tr32(NVRAM_CFG1);
14819         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14820
14821         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14822                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14823                         tg3_flag_set(tp, NO_NVRAM);
14824                         return;
14825                 }
14826
14827                 switch (nvmpinstrp) {
14828                 case FLASH_5762_EEPROM_HD:
14829                         nvmpinstrp = FLASH_5720_EEPROM_HD;
14830                         break;
14831                 case FLASH_5762_EEPROM_LD:
14832                         nvmpinstrp = FLASH_5720_EEPROM_LD;
14833                         break;
14834                 case FLASH_5720VENDOR_M_ST_M45PE20:
14835                         /* This pinstrap supports multiple sizes, so force it
14836                          * to read the actual size from location 0xf0.
14837                          */
14838                         nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14839                         break;
14840                 }
14841         }
14842
14843         switch (nvmpinstrp) {
14844         case FLASH_5720_EEPROM_HD:
14845         case FLASH_5720_EEPROM_LD:
14846                 tp->nvram_jedecnum = JEDEC_ATMEL;
14847                 tg3_flag_set(tp, NVRAM_BUFFERED);
14848
14849                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14850                 tw32(NVRAM_CFG1, nvcfg1);
14851                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14852                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14853                 else
14854                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14855                 return;
14856         case FLASH_5720VENDOR_M_ATMEL_DB011D:
14857         case FLASH_5720VENDOR_A_ATMEL_DB011B:
14858         case FLASH_5720VENDOR_A_ATMEL_DB011D:
14859         case FLASH_5720VENDOR_M_ATMEL_DB021D:
14860         case FLASH_5720VENDOR_A_ATMEL_DB021B:
14861         case FLASH_5720VENDOR_A_ATMEL_DB021D:
14862         case FLASH_5720VENDOR_M_ATMEL_DB041D:
14863         case FLASH_5720VENDOR_A_ATMEL_DB041B:
14864         case FLASH_5720VENDOR_A_ATMEL_DB041D:
14865         case FLASH_5720VENDOR_M_ATMEL_DB081D:
14866         case FLASH_5720VENDOR_A_ATMEL_DB081D:
14867         case FLASH_5720VENDOR_ATMEL_45USPT:
14868                 tp->nvram_jedecnum = JEDEC_ATMEL;
14869                 tg3_flag_set(tp, NVRAM_BUFFERED);
14870                 tg3_flag_set(tp, FLASH);
14871
14872                 switch (nvmpinstrp) {
14873                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14874                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14875                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14876                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14877                         break;
14878                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14879                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14880                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14881                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14882                         break;
14883                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14884                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14885                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14886                         break;
14887                 default:
14888                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14889                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14890                         break;
14891                 }
14892                 break;
14893         case FLASH_5720VENDOR_M_ST_M25PE10:
14894         case FLASH_5720VENDOR_M_ST_M45PE10:
14895         case FLASH_5720VENDOR_A_ST_M25PE10:
14896         case FLASH_5720VENDOR_A_ST_M45PE10:
14897         case FLASH_5720VENDOR_M_ST_M25PE20:
14898         case FLASH_5720VENDOR_M_ST_M45PE20:
14899         case FLASH_5720VENDOR_A_ST_M25PE20:
14900         case FLASH_5720VENDOR_A_ST_M45PE20:
14901         case FLASH_5720VENDOR_M_ST_M25PE40:
14902         case FLASH_5720VENDOR_M_ST_M45PE40:
14903         case FLASH_5720VENDOR_A_ST_M25PE40:
14904         case FLASH_5720VENDOR_A_ST_M45PE40:
14905         case FLASH_5720VENDOR_M_ST_M25PE80:
14906         case FLASH_5720VENDOR_M_ST_M45PE80:
14907         case FLASH_5720VENDOR_A_ST_M25PE80:
14908         case FLASH_5720VENDOR_A_ST_M45PE80:
14909         case FLASH_5720VENDOR_ST_25USPT:
14910         case FLASH_5720VENDOR_ST_45USPT:
14911                 tp->nvram_jedecnum = JEDEC_ST;
14912                 tg3_flag_set(tp, NVRAM_BUFFERED);
14913                 tg3_flag_set(tp, FLASH);
14914
14915                 switch (nvmpinstrp) {
14916                 case FLASH_5720VENDOR_M_ST_M25PE20:
14917                 case FLASH_5720VENDOR_M_ST_M45PE20:
14918                 case FLASH_5720VENDOR_A_ST_M25PE20:
14919                 case FLASH_5720VENDOR_A_ST_M45PE20:
14920                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14921                         break;
14922                 case FLASH_5720VENDOR_M_ST_M25PE40:
14923                 case FLASH_5720VENDOR_M_ST_M45PE40:
14924                 case FLASH_5720VENDOR_A_ST_M25PE40:
14925                 case FLASH_5720VENDOR_A_ST_M45PE40:
14926                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14927                         break;
14928                 case FLASH_5720VENDOR_M_ST_M25PE80:
14929                 case FLASH_5720VENDOR_M_ST_M45PE80:
14930                 case FLASH_5720VENDOR_A_ST_M25PE80:
14931                 case FLASH_5720VENDOR_A_ST_M45PE80:
14932                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14933                         break;
14934                 default:
14935                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14936                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14937                         break;
14938                 }
14939                 break;
14940         default:
14941                 tg3_flag_set(tp, NO_NVRAM);
14942                 return;
14943         }
14944
14945         tg3_nvram_get_pagesize(tp, nvcfg1);
14946         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14947                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14948
14949         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14950                 u32 val;
14951
14952                 if (tg3_nvram_read(tp, 0, &val))
14953                         return;
14954
14955                 if (val != TG3_EEPROM_MAGIC &&
14956                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14957                         tg3_flag_set(tp, NO_NVRAM);
14958         }
14959 }
14960
14961 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14962 static void tg3_nvram_init(struct tg3 *tp)
14963 {
14964         if (tg3_flag(tp, IS_SSB_CORE)) {
14965                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14966                 tg3_flag_clear(tp, NVRAM);
14967                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14968                 tg3_flag_set(tp, NO_NVRAM);
14969                 return;
14970         }
14971
14972         tw32_f(GRC_EEPROM_ADDR,
14973              (EEPROM_ADDR_FSM_RESET |
14974               (EEPROM_DEFAULT_CLOCK_PERIOD <<
14975                EEPROM_ADDR_CLKPERD_SHIFT)));
14976
14977         msleep(1);
14978
14979         /* Enable seeprom accesses. */
14980         tw32_f(GRC_LOCAL_CTRL,
14981              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14982         udelay(100);
14983
14984         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14985             tg3_asic_rev(tp) != ASIC_REV_5701) {
14986                 tg3_flag_set(tp, NVRAM);
14987
14988                 if (tg3_nvram_lock(tp)) {
14989                         netdev_warn(tp->dev,
14990                                     "Cannot get nvram lock, %s failed\n",
14991                                     __func__);
14992                         return;
14993                 }
14994                 tg3_enable_nvram_access(tp);
14995
14996                 tp->nvram_size = 0;
14997
14998                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14999                         tg3_get_5752_nvram_info(tp);
15000                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15001                         tg3_get_5755_nvram_info(tp);
15002                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15003                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
15004                          tg3_asic_rev(tp) == ASIC_REV_5785)
15005                         tg3_get_5787_nvram_info(tp);
15006                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15007                         tg3_get_5761_nvram_info(tp);
15008                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15009                         tg3_get_5906_nvram_info(tp);
15010                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15011                          tg3_flag(tp, 57765_CLASS))
15012                         tg3_get_57780_nvram_info(tp);
15013                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15014                          tg3_asic_rev(tp) == ASIC_REV_5719)
15015                         tg3_get_5717_nvram_info(tp);
15016                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15017                          tg3_asic_rev(tp) == ASIC_REV_5762)
15018                         tg3_get_5720_nvram_info(tp);
15019                 else
15020                         tg3_get_nvram_info(tp);
15021
15022                 if (tp->nvram_size == 0)
15023                         tg3_get_nvram_size(tp);
15024
15025                 tg3_disable_nvram_access(tp);
15026                 tg3_nvram_unlock(tp);
15027
15028         } else {
15029                 tg3_flag_clear(tp, NVRAM);
15030                 tg3_flag_clear(tp, NVRAM_BUFFERED);
15031
15032                 tg3_get_eeprom_size(tp);
15033         }
15034 }
15035
15036 struct subsys_tbl_ent {
15037         u16 subsys_vendor, subsys_devid;
15038         u32 phy_id;
15039 };
15040
15041 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15042         /* Broadcom boards. */
15043         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15044           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15045         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15046           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15047         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15048           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15049         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15050           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15051         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15052           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15053         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15054           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15055         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15056           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15057         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15058           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15059         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15060           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15061         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15062           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15063         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15064           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15065
15066         /* 3com boards. */
15067         { TG3PCI_SUBVENDOR_ID_3COM,
15068           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15069         { TG3PCI_SUBVENDOR_ID_3COM,
15070           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15071         { TG3PCI_SUBVENDOR_ID_3COM,
15072           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15073         { TG3PCI_SUBVENDOR_ID_3COM,
15074           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15075         { TG3PCI_SUBVENDOR_ID_3COM,
15076           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15077
15078         /* DELL boards. */
15079         { TG3PCI_SUBVENDOR_ID_DELL,
15080           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15081         { TG3PCI_SUBVENDOR_ID_DELL,
15082           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15083         { TG3PCI_SUBVENDOR_ID_DELL,
15084           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15085         { TG3PCI_SUBVENDOR_ID_DELL,
15086           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15087
15088         /* Compaq boards. */
15089         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15090           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15091         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15092           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15093         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15094           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15095         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15096           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15097         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15098           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15099
15100         /* IBM boards. */
15101         { TG3PCI_SUBVENDOR_ID_IBM,
15102           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15103 };
15104
15105 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15106 {
15107         int i;
15108
15109         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15110                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15111                      tp->pdev->subsystem_vendor) &&
15112                     (subsys_id_to_phy_id[i].subsys_devid ==
15113                      tp->pdev->subsystem_device))
15114                         return &subsys_id_to_phy_id[i];
15115         }
15116         return NULL;
15117 }
15118
15119 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15120 {
15121         u32 val;
15122
15123         tp->phy_id = TG3_PHY_ID_INVALID;
15124         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15125
15126         /* Assume an onboard device and WOL capable by default.  */
15127         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15128         tg3_flag_set(tp, WOL_CAP);
15129
15130         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15131                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15132                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15133                         tg3_flag_set(tp, IS_NIC);
15134                 }
15135                 val = tr32(VCPU_CFGSHDW);
15136                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15137                         tg3_flag_set(tp, ASPM_WORKAROUND);
15138                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15139                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15140                         tg3_flag_set(tp, WOL_ENABLE);
15141                         device_set_wakeup_enable(&tp->pdev->dev, true);
15142                 }
15143                 goto done;
15144         }
15145
15146         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15147         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15148                 u32 nic_cfg, led_cfg;
15149                 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15150                 u32 nic_phy_id, ver, eeprom_phy_id;
15151                 int eeprom_phy_serdes = 0;
15152
15153                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15154                 tp->nic_sram_data_cfg = nic_cfg;
15155
15156                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15157                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15158                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15159                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
15160                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
15161                     (ver > 0) && (ver < 0x100))
15162                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15163
15164                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15165                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15166
15167                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15168                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
15169                     tg3_asic_rev(tp) == ASIC_REV_5720)
15170                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15171
15172                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15173                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15174                         eeprom_phy_serdes = 1;
15175
15176                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15177                 if (nic_phy_id != 0) {
15178                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15179                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15180
15181                         eeprom_phy_id  = (id1 >> 16) << 10;
15182                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
15183                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15184                 } else
15185                         eeprom_phy_id = 0;
15186
15187                 tp->phy_id = eeprom_phy_id;
15188                 if (eeprom_phy_serdes) {
15189                         if (!tg3_flag(tp, 5705_PLUS))
15190                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15191                         else
15192                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15193                 }
15194
15195                 if (tg3_flag(tp, 5750_PLUS))
15196                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15197                                     SHASTA_EXT_LED_MODE_MASK);
15198                 else
15199                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15200
15201                 switch (led_cfg) {
15202                 default:
15203                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15204                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15205                         break;
15206
15207                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15208                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15209                         break;
15210
15211                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15212                         tp->led_ctrl = LED_CTRL_MODE_MAC;
15213
15214                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15215                          * read on some older 5700/5701 bootcode.
15216                          */
15217                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15218                             tg3_asic_rev(tp) == ASIC_REV_5701)
15219                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15220
15221                         break;
15222
15223                 case SHASTA_EXT_LED_SHARED:
15224                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
15225                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15226                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15227                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15228                                                  LED_CTRL_MODE_PHY_2);
15229
15230                         if (tg3_flag(tp, 5717_PLUS) ||
15231                             tg3_asic_rev(tp) == ASIC_REV_5762)
15232                                 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15233                                                 LED_CTRL_BLINK_RATE_MASK;
15234
15235                         break;
15236
15237                 case SHASTA_EXT_LED_MAC:
15238                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15239                         break;
15240
15241                 case SHASTA_EXT_LED_COMBO:
15242                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
15243                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15244                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15245                                                  LED_CTRL_MODE_PHY_2);
15246                         break;
15247
15248                 }
15249
15250                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15251                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
15252                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15253                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15254
15255                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15256                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15257
15258                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15259                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15260                         if ((tp->pdev->subsystem_vendor ==
15261                              PCI_VENDOR_ID_ARIMA) &&
15262                             (tp->pdev->subsystem_device == 0x205a ||
15263                              tp->pdev->subsystem_device == 0x2063))
15264                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15265                 } else {
15266                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15267                         tg3_flag_set(tp, IS_NIC);
15268                 }
15269
15270                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15271                         tg3_flag_set(tp, ENABLE_ASF);
15272                         if (tg3_flag(tp, 5750_PLUS))
15273                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15274                 }
15275
15276                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15277                     tg3_flag(tp, 5750_PLUS))
15278                         tg3_flag_set(tp, ENABLE_APE);
15279
15280                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15281                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15282                         tg3_flag_clear(tp, WOL_CAP);
15283
15284                 if (tg3_flag(tp, WOL_CAP) &&
15285                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15286                         tg3_flag_set(tp, WOL_ENABLE);
15287                         device_set_wakeup_enable(&tp->pdev->dev, true);
15288                 }
15289
15290                 if (cfg2 & (1 << 17))
15291                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15292
15293                 /* serdes signal pre-emphasis in register 0x590 set by */
15294                 /* bootcode if bit 18 is set */
15295                 if (cfg2 & (1 << 18))
15296                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15297
15298                 if ((tg3_flag(tp, 57765_PLUS) ||
15299                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15300                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15301                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15302                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15303
15304                 if (tg3_flag(tp, PCI_EXPRESS)) {
15305                         u32 cfg3;
15306
15307                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15308                         if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15309                             !tg3_flag(tp, 57765_PLUS) &&
15310                             (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15311                                 tg3_flag_set(tp, ASPM_WORKAROUND);
15312                         if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15313                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15314                         if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15315                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15316                 }
15317
15318                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15319                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15320                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15321                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15322                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15323                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15324
15325                 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15326                         tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15327         }
15328 done:
15329         if (tg3_flag(tp, WOL_CAP))
15330                 device_set_wakeup_enable(&tp->pdev->dev,
15331                                          tg3_flag(tp, WOL_ENABLE));
15332         else
15333                 device_set_wakeup_capable(&tp->pdev->dev, false);
15334 }
15335
15336 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15337 {
15338         int i, err;
15339         u32 val2, off = offset * 8;
15340
15341         err = tg3_nvram_lock(tp);
15342         if (err)
15343                 return err;
15344
15345         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15346         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15347                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15348         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15349         udelay(10);
15350
15351         for (i = 0; i < 100; i++) {
15352                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15353                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15354                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15355                         break;
15356                 }
15357                 udelay(10);
15358         }
15359
15360         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15361
15362         tg3_nvram_unlock(tp);
15363         if (val2 & APE_OTP_STATUS_CMD_DONE)
15364                 return 0;
15365
15366         return -EBUSY;
15367 }
15368
15369 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15370 {
15371         int i;
15372         u32 val;
15373
15374         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15375         tw32(OTP_CTRL, cmd);
15376
15377         /* Wait for up to 1 ms for command to execute. */
15378         for (i = 0; i < 100; i++) {
15379                 val = tr32(OTP_STATUS);
15380                 if (val & OTP_STATUS_CMD_DONE)
15381                         break;
15382                 udelay(10);
15383         }
15384
15385         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15386 }
15387
15388 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15389  * configuration is a 32-bit value that straddles the alignment boundary.
15390  * We do two 32-bit reads and then shift and merge the results.
15391  */
15392 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15393 {
15394         u32 bhalf_otp, thalf_otp;
15395
15396         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15397
15398         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15399                 return 0;
15400
15401         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15402
15403         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15404                 return 0;
15405
15406         thalf_otp = tr32(OTP_READ_DATA);
15407
15408         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15409
15410         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15411                 return 0;
15412
15413         bhalf_otp = tr32(OTP_READ_DATA);
15414
15415         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15416 }
15417
15418 static void tg3_phy_init_link_config(struct tg3 *tp)
15419 {
15420         u32 adv = ADVERTISED_Autoneg;
15421
15422         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15423                 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15424                         adv |= ADVERTISED_1000baseT_Half;
15425                 adv |= ADVERTISED_1000baseT_Full;
15426         }
15427
15428         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15429                 adv |= ADVERTISED_100baseT_Half |
15430                        ADVERTISED_100baseT_Full |
15431                        ADVERTISED_10baseT_Half |
15432                        ADVERTISED_10baseT_Full |
15433                        ADVERTISED_TP;
15434         else
15435                 adv |= ADVERTISED_FIBRE;
15436
15437         tp->link_config.advertising = adv;
15438         tp->link_config.speed = SPEED_UNKNOWN;
15439         tp->link_config.duplex = DUPLEX_UNKNOWN;
15440         tp->link_config.autoneg = AUTONEG_ENABLE;
15441         tp->link_config.active_speed = SPEED_UNKNOWN;
15442         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15443
15444         tp->old_link = -1;
15445 }
15446
15447 static int tg3_phy_probe(struct tg3 *tp)
15448 {
15449         u32 hw_phy_id_1, hw_phy_id_2;
15450         u32 hw_phy_id, hw_phy_id_masked;
15451         int err;
15452
15453         /* flow control autonegotiation is default behavior */
15454         tg3_flag_set(tp, PAUSE_AUTONEG);
15455         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15456
15457         if (tg3_flag(tp, ENABLE_APE)) {
15458                 switch (tp->pci_fn) {
15459                 case 0:
15460                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15461                         break;
15462                 case 1:
15463                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15464                         break;
15465                 case 2:
15466                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15467                         break;
15468                 case 3:
15469                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15470                         break;
15471                 }
15472         }
15473
15474         if (!tg3_flag(tp, ENABLE_ASF) &&
15475             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15476             !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15477                 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15478                                    TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15479
15480         if (tg3_flag(tp, USE_PHYLIB))
15481                 return tg3_phy_init(tp);
15482
15483         /* Reading the PHY ID register can conflict with ASF
15484          * firmware access to the PHY hardware.
15485          */
15486         err = 0;
15487         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15488                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15489         } else {
15490                 /* Now read the physical PHY_ID from the chip and verify
15491                  * that it is sane.  If it doesn't look good, we fall back
15492                  * to either the hard-coded table based PHY_ID and failing
15493                  * that the value found in the eeprom area.
15494                  */
15495                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15496                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15497
15498                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15499                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15500                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15501
15502                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15503         }
15504
15505         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15506                 tp->phy_id = hw_phy_id;
15507                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15508                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15509                 else
15510                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15511         } else {
15512                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15513                         /* Do nothing, phy ID already set up in
15514                          * tg3_get_eeprom_hw_cfg().
15515                          */
15516                 } else {
15517                         struct subsys_tbl_ent *p;
15518
15519                         /* No eeprom signature?  Try the hardcoded
15520                          * subsys device table.
15521                          */
15522                         p = tg3_lookup_by_subsys(tp);
15523                         if (p) {
15524                                 tp->phy_id = p->phy_id;
15525                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15526                                 /* For now we saw the IDs 0xbc050cd0,
15527                                  * 0xbc050f80 and 0xbc050c30 on devices
15528                                  * connected to an BCM4785 and there are
15529                                  * probably more. Just assume that the phy is
15530                                  * supported when it is connected to a SSB core
15531                                  * for now.
15532                                  */
15533                                 return -ENODEV;
15534                         }
15535
15536                         if (!tp->phy_id ||
15537                             tp->phy_id == TG3_PHY_ID_BCM8002)
15538                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15539                 }
15540         }
15541
15542         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15543             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15544              tg3_asic_rev(tp) == ASIC_REV_5720 ||
15545              tg3_asic_rev(tp) == ASIC_REV_57766 ||
15546              tg3_asic_rev(tp) == ASIC_REV_5762 ||
15547              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15548               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15549              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15550               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15551                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15552
15553                 tp->eee.supported = SUPPORTED_100baseT_Full |
15554                                     SUPPORTED_1000baseT_Full;
15555                 tp->eee.advertised = ADVERTISED_100baseT_Full |
15556                                      ADVERTISED_1000baseT_Full;
15557                 tp->eee.eee_enabled = 1;
15558                 tp->eee.tx_lpi_enabled = 1;
15559                 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15560         }
15561
15562         tg3_phy_init_link_config(tp);
15563
15564         if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15565             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15566             !tg3_flag(tp, ENABLE_APE) &&
15567             !tg3_flag(tp, ENABLE_ASF)) {
15568                 u32 bmsr, dummy;
15569
15570                 tg3_readphy(tp, MII_BMSR, &bmsr);
15571                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15572                     (bmsr & BMSR_LSTATUS))
15573                         goto skip_phy_reset;
15574
15575                 err = tg3_phy_reset(tp);
15576                 if (err)
15577                         return err;
15578
15579                 tg3_phy_set_wirespeed(tp);
15580
15581                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15582                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15583                                             tp->link_config.flowctrl);
15584
15585                         tg3_writephy(tp, MII_BMCR,
15586                                      BMCR_ANENABLE | BMCR_ANRESTART);
15587                 }
15588         }
15589
15590 skip_phy_reset:
15591         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15592                 err = tg3_init_5401phy_dsp(tp);
15593                 if (err)
15594                         return err;
15595
15596                 err = tg3_init_5401phy_dsp(tp);
15597         }
15598
15599         return err;
15600 }
15601
15602 static void tg3_read_vpd(struct tg3 *tp)
15603 {
15604         u8 *vpd_data;
15605         unsigned int block_end, rosize, len;
15606         u32 vpdlen;
15607         int j, i = 0;
15608
15609         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15610         if (!vpd_data)
15611                 goto out_no_vpd;
15612
15613         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15614         if (i < 0)
15615                 goto out_not_found;
15616
15617         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15618         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15619         i += PCI_VPD_LRDT_TAG_SIZE;
15620
15621         if (block_end > vpdlen)
15622                 goto out_not_found;
15623
15624         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15625                                       PCI_VPD_RO_KEYWORD_MFR_ID);
15626         if (j > 0) {
15627                 len = pci_vpd_info_field_size(&vpd_data[j]);
15628
15629                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15630                 if (j + len > block_end || len != 4 ||
15631                     memcmp(&vpd_data[j], "1028", 4))
15632                         goto partno;
15633
15634                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15635                                               PCI_VPD_RO_KEYWORD_VENDOR0);
15636                 if (j < 0)
15637                         goto partno;
15638
15639                 len = pci_vpd_info_field_size(&vpd_data[j]);
15640
15641                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15642                 if (j + len > block_end)
15643                         goto partno;
15644
15645                 if (len >= sizeof(tp->fw_ver))
15646                         len = sizeof(tp->fw_ver) - 1;
15647                 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15648                 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15649                          &vpd_data[j]);
15650         }
15651
15652 partno:
15653         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15654                                       PCI_VPD_RO_KEYWORD_PARTNO);
15655         if (i < 0)
15656                 goto out_not_found;
15657
15658         len = pci_vpd_info_field_size(&vpd_data[i]);
15659
15660         i += PCI_VPD_INFO_FLD_HDR_SIZE;
15661         if (len > TG3_BPN_SIZE ||
15662             (len + i) > vpdlen)
15663                 goto out_not_found;
15664
15665         memcpy(tp->board_part_number, &vpd_data[i], len);
15666
15667 out_not_found:
15668         kfree(vpd_data);
15669         if (tp->board_part_number[0])
15670                 return;
15671
15672 out_no_vpd:
15673         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15674                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15675                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15676                         strcpy(tp->board_part_number, "BCM5717");
15677                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15678                         strcpy(tp->board_part_number, "BCM5718");
15679                 else
15680                         goto nomatch;
15681         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15682                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15683                         strcpy(tp->board_part_number, "BCM57780");
15684                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15685                         strcpy(tp->board_part_number, "BCM57760");
15686                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15687                         strcpy(tp->board_part_number, "BCM57790");
15688                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15689                         strcpy(tp->board_part_number, "BCM57788");
15690                 else
15691                         goto nomatch;
15692         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15693                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15694                         strcpy(tp->board_part_number, "BCM57761");
15695                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15696                         strcpy(tp->board_part_number, "BCM57765");
15697                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15698                         strcpy(tp->board_part_number, "BCM57781");
15699                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15700                         strcpy(tp->board_part_number, "BCM57785");
15701                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15702                         strcpy(tp->board_part_number, "BCM57791");
15703                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15704                         strcpy(tp->board_part_number, "BCM57795");
15705                 else
15706                         goto nomatch;
15707         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15708                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15709                         strcpy(tp->board_part_number, "BCM57762");
15710                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15711                         strcpy(tp->board_part_number, "BCM57766");
15712                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15713                         strcpy(tp->board_part_number, "BCM57782");
15714                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15715                         strcpy(tp->board_part_number, "BCM57786");
15716                 else
15717                         goto nomatch;
15718         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15719                 strcpy(tp->board_part_number, "BCM95906");
15720         } else {
15721 nomatch:
15722                 strcpy(tp->board_part_number, "none");
15723         }
15724 }
15725
15726 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15727 {
15728         u32 val;
15729
15730         if (tg3_nvram_read(tp, offset, &val) ||
15731             (val & 0xfc000000) != 0x0c000000 ||
15732             tg3_nvram_read(tp, offset + 4, &val) ||
15733             val != 0)
15734                 return 0;
15735
15736         return 1;
15737 }
15738
15739 static void tg3_read_bc_ver(struct tg3 *tp)
15740 {
15741         u32 val, offset, start, ver_offset;
15742         int i, dst_off;
15743         bool newver = false;
15744
15745         if (tg3_nvram_read(tp, 0xc, &offset) ||
15746             tg3_nvram_read(tp, 0x4, &start))
15747                 return;
15748
15749         offset = tg3_nvram_logical_addr(tp, offset);
15750
15751         if (tg3_nvram_read(tp, offset, &val))
15752                 return;
15753
15754         if ((val & 0xfc000000) == 0x0c000000) {
15755                 if (tg3_nvram_read(tp, offset + 4, &val))
15756                         return;
15757
15758                 if (val == 0)
15759                         newver = true;
15760         }
15761
15762         dst_off = strlen(tp->fw_ver);
15763
15764         if (newver) {
15765                 if (TG3_VER_SIZE - dst_off < 16 ||
15766                     tg3_nvram_read(tp, offset + 8, &ver_offset))
15767                         return;
15768
15769                 offset = offset + ver_offset - start;
15770                 for (i = 0; i < 16; i += 4) {
15771                         __be32 v;
15772                         if (tg3_nvram_read_be32(tp, offset + i, &v))
15773                                 return;
15774
15775                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15776                 }
15777         } else {
15778                 u32 major, minor;
15779
15780                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15781                         return;
15782
15783                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15784                         TG3_NVM_BCVER_MAJSFT;
15785                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15786                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15787                          "v%d.%02d", major, minor);
15788         }
15789 }
15790
15791 static void tg3_read_hwsb_ver(struct tg3 *tp)
15792 {
15793         u32 val, major, minor;
15794
15795         /* Use native endian representation */
15796         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15797                 return;
15798
15799         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15800                 TG3_NVM_HWSB_CFG1_MAJSFT;
15801         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15802                 TG3_NVM_HWSB_CFG1_MINSFT;
15803
15804         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15805 }
15806
15807 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15808 {
15809         u32 offset, major, minor, build;
15810
15811         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15812
15813         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15814                 return;
15815
15816         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15817         case TG3_EEPROM_SB_REVISION_0:
15818                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15819                 break;
15820         case TG3_EEPROM_SB_REVISION_2:
15821                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15822                 break;
15823         case TG3_EEPROM_SB_REVISION_3:
15824                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15825                 break;
15826         case TG3_EEPROM_SB_REVISION_4:
15827                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15828                 break;
15829         case TG3_EEPROM_SB_REVISION_5:
15830                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15831                 break;
15832         case TG3_EEPROM_SB_REVISION_6:
15833                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15834                 break;
15835         default:
15836                 return;
15837         }
15838
15839         if (tg3_nvram_read(tp, offset, &val))
15840                 return;
15841
15842         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15843                 TG3_EEPROM_SB_EDH_BLD_SHFT;
15844         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15845                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15846         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15847
15848         if (minor > 99 || build > 26)
15849                 return;
15850
15851         offset = strlen(tp->fw_ver);
15852         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15853                  " v%d.%02d", major, minor);
15854
15855         if (build > 0) {
15856                 offset = strlen(tp->fw_ver);
15857                 if (offset < TG3_VER_SIZE - 1)
15858                         tp->fw_ver[offset] = 'a' + build - 1;
15859         }
15860 }
15861
15862 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15863 {
15864         u32 val, offset, start;
15865         int i, vlen;
15866
15867         for (offset = TG3_NVM_DIR_START;
15868              offset < TG3_NVM_DIR_END;
15869              offset += TG3_NVM_DIRENT_SIZE) {
15870                 if (tg3_nvram_read(tp, offset, &val))
15871                         return;
15872
15873                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15874                         break;
15875         }
15876
15877         if (offset == TG3_NVM_DIR_END)
15878                 return;
15879
15880         if (!tg3_flag(tp, 5705_PLUS))
15881                 start = 0x08000000;
15882         else if (tg3_nvram_read(tp, offset - 4, &start))
15883                 return;
15884
15885         if (tg3_nvram_read(tp, offset + 4, &offset) ||
15886             !tg3_fw_img_is_valid(tp, offset) ||
15887             tg3_nvram_read(tp, offset + 8, &val))
15888                 return;
15889
15890         offset += val - start;
15891
15892         vlen = strlen(tp->fw_ver);
15893
15894         tp->fw_ver[vlen++] = ',';
15895         tp->fw_ver[vlen++] = ' ';
15896
15897         for (i = 0; i < 4; i++) {
15898                 __be32 v;
15899                 if (tg3_nvram_read_be32(tp, offset, &v))
15900                         return;
15901
15902                 offset += sizeof(v);
15903
15904                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15905                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15906                         break;
15907                 }
15908
15909                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15910                 vlen += sizeof(v);
15911         }
15912 }
15913
15914 static void tg3_probe_ncsi(struct tg3 *tp)
15915 {
15916         u32 apedata;
15917
15918         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15919         if (apedata != APE_SEG_SIG_MAGIC)
15920                 return;
15921
15922         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15923         if (!(apedata & APE_FW_STATUS_READY))
15924                 return;
15925
15926         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15927                 tg3_flag_set(tp, APE_HAS_NCSI);
15928 }
15929
15930 static void tg3_read_dash_ver(struct tg3 *tp)
15931 {
15932         int vlen;
15933         u32 apedata;
15934         char *fwtype;
15935
15936         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15937
15938         if (tg3_flag(tp, APE_HAS_NCSI))
15939                 fwtype = "NCSI";
15940         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15941                 fwtype = "SMASH";
15942         else
15943                 fwtype = "DASH";
15944
15945         vlen = strlen(tp->fw_ver);
15946
15947         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15948                  fwtype,
15949                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15950                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15951                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15952                  (apedata & APE_FW_VERSION_BLDMSK));
15953 }
15954
15955 static void tg3_read_otp_ver(struct tg3 *tp)
15956 {
15957         u32 val, val2;
15958
15959         if (tg3_asic_rev(tp) != ASIC_REV_5762)
15960                 return;
15961
15962         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15963             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15964             TG3_OTP_MAGIC0_VALID(val)) {
15965                 u64 val64 = (u64) val << 32 | val2;
15966                 u32 ver = 0;
15967                 int i, vlen;
15968
15969                 for (i = 0; i < 7; i++) {
15970                         if ((val64 & 0xff) == 0)
15971                                 break;
15972                         ver = val64 & 0xff;
15973                         val64 >>= 8;
15974                 }
15975                 vlen = strlen(tp->fw_ver);
15976                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15977         }
15978 }
15979
15980 static void tg3_read_fw_ver(struct tg3 *tp)
15981 {
15982         u32 val;
15983         bool vpd_vers = false;
15984
15985         if (tp->fw_ver[0] != 0)
15986                 vpd_vers = true;
15987
15988         if (tg3_flag(tp, NO_NVRAM)) {
15989                 strcat(tp->fw_ver, "sb");
15990                 tg3_read_otp_ver(tp);
15991                 return;
15992         }
15993
15994         if (tg3_nvram_read(tp, 0, &val))
15995                 return;
15996
15997         if (val == TG3_EEPROM_MAGIC)
15998                 tg3_read_bc_ver(tp);
15999         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
16000                 tg3_read_sb_ver(tp, val);
16001         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16002                 tg3_read_hwsb_ver(tp);
16003
16004         if (tg3_flag(tp, ENABLE_ASF)) {
16005                 if (tg3_flag(tp, ENABLE_APE)) {
16006                         tg3_probe_ncsi(tp);
16007                         if (!vpd_vers)
16008                                 tg3_read_dash_ver(tp);
16009                 } else if (!vpd_vers) {
16010                         tg3_read_mgmtfw_ver(tp);
16011                 }
16012         }
16013
16014         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16015 }
16016
16017 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16018 {
16019         if (tg3_flag(tp, LRG_PROD_RING_CAP))
16020                 return TG3_RX_RET_MAX_SIZE_5717;
16021         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16022                 return TG3_RX_RET_MAX_SIZE_5700;
16023         else
16024                 return TG3_RX_RET_MAX_SIZE_5705;
16025 }
16026
16027 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16028         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16029         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16030         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16031         { },
16032 };
16033
16034 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16035 {
16036         struct pci_dev *peer;
16037         unsigned int func, devnr = tp->pdev->devfn & ~7;
16038
16039         for (func = 0; func < 8; func++) {
16040                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16041                 if (peer && peer != tp->pdev)
16042                         break;
16043                 pci_dev_put(peer);
16044         }
16045         /* 5704 can be configured in single-port mode, set peer to
16046          * tp->pdev in that case.
16047          */
16048         if (!peer) {
16049                 peer = tp->pdev;
16050                 return peer;
16051         }
16052
16053         /*
16054          * We don't need to keep the refcount elevated; there's no way
16055          * to remove one half of this device without removing the other
16056          */
16057         pci_dev_put(peer);
16058
16059         return peer;
16060 }
16061
16062 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16063 {
16064         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16065         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16066                 u32 reg;
16067
16068                 /* All devices that use the alternate
16069                  * ASIC REV location have a CPMU.
16070                  */
16071                 tg3_flag_set(tp, CPMU_PRESENT);
16072
16073                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16074                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16075                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16076                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16077                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16078                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16079                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16080                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16081                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16082                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16083                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16084                         reg = TG3PCI_GEN2_PRODID_ASICREV;
16085                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16086                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16087                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16088                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16089                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16090                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16091                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16092                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16093                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16094                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16095                         reg = TG3PCI_GEN15_PRODID_ASICREV;
16096                 else
16097                         reg = TG3PCI_PRODID_ASICREV;
16098
16099                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16100         }
16101
16102         /* Wrong chip ID in 5752 A0. This code can be removed later
16103          * as A0 is not in production.
16104          */
16105         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16106                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16107
16108         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16109                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16110
16111         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16112             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16113             tg3_asic_rev(tp) == ASIC_REV_5720)
16114                 tg3_flag_set(tp, 5717_PLUS);
16115
16116         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16117             tg3_asic_rev(tp) == ASIC_REV_57766)
16118                 tg3_flag_set(tp, 57765_CLASS);
16119
16120         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16121              tg3_asic_rev(tp) == ASIC_REV_5762)
16122                 tg3_flag_set(tp, 57765_PLUS);
16123
16124         /* Intentionally exclude ASIC_REV_5906 */
16125         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16126             tg3_asic_rev(tp) == ASIC_REV_5787 ||
16127             tg3_asic_rev(tp) == ASIC_REV_5784 ||
16128             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16129             tg3_asic_rev(tp) == ASIC_REV_5785 ||
16130             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16131             tg3_flag(tp, 57765_PLUS))
16132                 tg3_flag_set(tp, 5755_PLUS);
16133
16134         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16135             tg3_asic_rev(tp) == ASIC_REV_5714)
16136                 tg3_flag_set(tp, 5780_CLASS);
16137
16138         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16139             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16140             tg3_asic_rev(tp) == ASIC_REV_5906 ||
16141             tg3_flag(tp, 5755_PLUS) ||
16142             tg3_flag(tp, 5780_CLASS))
16143                 tg3_flag_set(tp, 5750_PLUS);
16144
16145         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16146             tg3_flag(tp, 5750_PLUS))
16147                 tg3_flag_set(tp, 5705_PLUS);
16148 }
16149
16150 static bool tg3_10_100_only_device(struct tg3 *tp,
16151                                    const struct pci_device_id *ent)
16152 {
16153         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16154
16155         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16156              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16157             (tp->phy_flags & TG3_PHYFLG_IS_FET))
16158                 return true;
16159
16160         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16161                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16162                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16163                                 return true;
16164                 } else {
16165                         return true;
16166                 }
16167         }
16168
16169         return false;
16170 }
16171
16172 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16173 {
16174         u32 misc_ctrl_reg;
16175         u32 pci_state_reg, grc_misc_cfg;
16176         u32 val;
16177         u16 pci_cmd;
16178         int err;
16179
16180         /* Force memory write invalidate off.  If we leave it on,
16181          * then on 5700_BX chips we have to enable a workaround.
16182          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16183          * to match the cacheline size.  The Broadcom driver have this
16184          * workaround but turns MWI off all the times so never uses
16185          * it.  This seems to suggest that the workaround is insufficient.
16186          */
16187         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16188         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16189         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16190
16191         /* Important! -- Make sure register accesses are byteswapped
16192          * correctly.  Also, for those chips that require it, make
16193          * sure that indirect register accesses are enabled before
16194          * the first operation.
16195          */
16196         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16197                               &misc_ctrl_reg);
16198         tp->misc_host_ctrl |= (misc_ctrl_reg &
16199                                MISC_HOST_CTRL_CHIPREV);
16200         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16201                                tp->misc_host_ctrl);
16202
16203         tg3_detect_asic_rev(tp, misc_ctrl_reg);
16204
16205         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16206          * we need to disable memory and use config. cycles
16207          * only to access all registers. The 5702/03 chips
16208          * can mistakenly decode the special cycles from the
16209          * ICH chipsets as memory write cycles, causing corruption
16210          * of register and memory space. Only certain ICH bridges
16211          * will drive special cycles with non-zero data during the
16212          * address phase which can fall within the 5703's address
16213          * range. This is not an ICH bug as the PCI spec allows
16214          * non-zero address during special cycles. However, only
16215          * these ICH bridges are known to drive non-zero addresses
16216          * during special cycles.
16217          *
16218          * Since special cycles do not cross PCI bridges, we only
16219          * enable this workaround if the 5703 is on the secondary
16220          * bus of these ICH bridges.
16221          */
16222         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16223             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16224                 static struct tg3_dev_id {
16225                         u32     vendor;
16226                         u32     device;
16227                         u32     rev;
16228                 } ich_chipsets[] = {
16229                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16230                           PCI_ANY_ID },
16231                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16232                           PCI_ANY_ID },
16233                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16234                           0xa },
16235                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16236                           PCI_ANY_ID },
16237                         { },
16238                 };
16239                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16240                 struct pci_dev *bridge = NULL;
16241
16242                 while (pci_id->vendor != 0) {
16243                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
16244                                                 bridge);
16245                         if (!bridge) {
16246                                 pci_id++;
16247                                 continue;
16248                         }
16249                         if (pci_id->rev != PCI_ANY_ID) {
16250                                 if (bridge->revision > pci_id->rev)
16251                                         continue;
16252                         }
16253                         if (bridge->subordinate &&
16254                             (bridge->subordinate->number ==
16255                              tp->pdev->bus->number)) {
16256                                 tg3_flag_set(tp, ICH_WORKAROUND);
16257                                 pci_dev_put(bridge);
16258                                 break;
16259                         }
16260                 }
16261         }
16262
16263         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16264                 static struct tg3_dev_id {
16265                         u32     vendor;
16266                         u32     device;
16267                 } bridge_chipsets[] = {
16268                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16269                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16270                         { },
16271                 };
16272                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16273                 struct pci_dev *bridge = NULL;
16274
16275                 while (pci_id->vendor != 0) {
16276                         bridge = pci_get_device(pci_id->vendor,
16277                                                 pci_id->device,
16278                                                 bridge);
16279                         if (!bridge) {
16280                                 pci_id++;
16281                                 continue;
16282                         }
16283                         if (bridge->subordinate &&
16284                             (bridge->subordinate->number <=
16285                              tp->pdev->bus->number) &&
16286                             (bridge->subordinate->busn_res.end >=
16287                              tp->pdev->bus->number)) {
16288                                 tg3_flag_set(tp, 5701_DMA_BUG);
16289                                 pci_dev_put(bridge);
16290                                 break;
16291                         }
16292                 }
16293         }
16294
16295         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16296          * DMA addresses > 40-bit. This bridge may have other additional
16297          * 57xx devices behind it in some 4-port NIC designs for example.
16298          * Any tg3 device found behind the bridge will also need the 40-bit
16299          * DMA workaround.
16300          */
16301         if (tg3_flag(tp, 5780_CLASS)) {
16302                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16303                 tp->msi_cap = tp->pdev->msi_cap;
16304         } else {
16305                 struct pci_dev *bridge = NULL;
16306
16307                 do {
16308                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16309                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
16310                                                 bridge);
16311                         if (bridge && bridge->subordinate &&
16312                             (bridge->subordinate->number <=
16313                              tp->pdev->bus->number) &&
16314                             (bridge->subordinate->busn_res.end >=
16315                              tp->pdev->bus->number)) {
16316                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16317                                 pci_dev_put(bridge);
16318                                 break;
16319                         }
16320                 } while (bridge);
16321         }
16322
16323         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16324             tg3_asic_rev(tp) == ASIC_REV_5714)
16325                 tp->pdev_peer = tg3_find_peer(tp);
16326
16327         /* Determine TSO capabilities */
16328         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16329                 ; /* Do nothing. HW bug. */
16330         else if (tg3_flag(tp, 57765_PLUS))
16331                 tg3_flag_set(tp, HW_TSO_3);
16332         else if (tg3_flag(tp, 5755_PLUS) ||
16333                  tg3_asic_rev(tp) == ASIC_REV_5906)
16334                 tg3_flag_set(tp, HW_TSO_2);
16335         else if (tg3_flag(tp, 5750_PLUS)) {
16336                 tg3_flag_set(tp, HW_TSO_1);
16337                 tg3_flag_set(tp, TSO_BUG);
16338                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16339                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16340                         tg3_flag_clear(tp, TSO_BUG);
16341         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16342                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
16343                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16344                 tg3_flag_set(tp, FW_TSO);
16345                 tg3_flag_set(tp, TSO_BUG);
16346                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16347                         tp->fw_needed = FIRMWARE_TG3TSO5;
16348                 else
16349                         tp->fw_needed = FIRMWARE_TG3TSO;
16350         }
16351
16352         /* Selectively allow TSO based on operating conditions */
16353         if (tg3_flag(tp, HW_TSO_1) ||
16354             tg3_flag(tp, HW_TSO_2) ||
16355             tg3_flag(tp, HW_TSO_3) ||
16356             tg3_flag(tp, FW_TSO)) {
16357                 /* For firmware TSO, assume ASF is disabled.
16358                  * We'll disable TSO later if we discover ASF
16359                  * is enabled in tg3_get_eeprom_hw_cfg().
16360                  */
16361                 tg3_flag_set(tp, TSO_CAPABLE);
16362         } else {
16363                 tg3_flag_clear(tp, TSO_CAPABLE);
16364                 tg3_flag_clear(tp, TSO_BUG);
16365                 tp->fw_needed = NULL;
16366         }
16367
16368         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16369                 tp->fw_needed = FIRMWARE_TG3;
16370
16371         if (tg3_asic_rev(tp) == ASIC_REV_57766)
16372                 tp->fw_needed = FIRMWARE_TG357766;
16373
16374         tp->irq_max = 1;
16375
16376         if (tg3_flag(tp, 5750_PLUS)) {
16377                 tg3_flag_set(tp, SUPPORT_MSI);
16378                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16379                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16380                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16381                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16382                      tp->pdev_peer == tp->pdev))
16383                         tg3_flag_clear(tp, SUPPORT_MSI);
16384
16385                 if (tg3_flag(tp, 5755_PLUS) ||
16386                     tg3_asic_rev(tp) == ASIC_REV_5906) {
16387                         tg3_flag_set(tp, 1SHOT_MSI);
16388                 }
16389
16390                 if (tg3_flag(tp, 57765_PLUS)) {
16391                         tg3_flag_set(tp, SUPPORT_MSIX);
16392                         tp->irq_max = TG3_IRQ_MAX_VECS;
16393                 }
16394         }
16395
16396         tp->txq_max = 1;
16397         tp->rxq_max = 1;
16398         if (tp->irq_max > 1) {
16399                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16400                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16401
16402                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16403                     tg3_asic_rev(tp) == ASIC_REV_5720)
16404                         tp->txq_max = tp->irq_max - 1;
16405         }
16406
16407         if (tg3_flag(tp, 5755_PLUS) ||
16408             tg3_asic_rev(tp) == ASIC_REV_5906)
16409                 tg3_flag_set(tp, SHORT_DMA_BUG);
16410
16411         if (tg3_asic_rev(tp) == ASIC_REV_5719)
16412                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16413
16414         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16415             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16416             tg3_asic_rev(tp) == ASIC_REV_5720 ||
16417             tg3_asic_rev(tp) == ASIC_REV_5762)
16418                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16419
16420         if (tg3_flag(tp, 57765_PLUS) &&
16421             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16422                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16423
16424         if (!tg3_flag(tp, 5705_PLUS) ||
16425             tg3_flag(tp, 5780_CLASS) ||
16426             tg3_flag(tp, USE_JUMBO_BDFLAG))
16427                 tg3_flag_set(tp, JUMBO_CAPABLE);
16428
16429         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16430                               &pci_state_reg);
16431
16432         if (pci_is_pcie(tp->pdev)) {
16433                 u16 lnkctl;
16434
16435                 tg3_flag_set(tp, PCI_EXPRESS);
16436
16437                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16438                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16439                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16440                                 tg3_flag_clear(tp, HW_TSO_2);
16441                                 tg3_flag_clear(tp, TSO_CAPABLE);
16442                         }
16443                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16444                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16445                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16446                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16447                                 tg3_flag_set(tp, CLKREQ_BUG);
16448                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16449                         tg3_flag_set(tp, L1PLLPD_EN);
16450                 }
16451         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16452                 /* BCM5785 devices are effectively PCIe devices, and should
16453                  * follow PCIe codepaths, but do not have a PCIe capabilities
16454                  * section.
16455                  */
16456                 tg3_flag_set(tp, PCI_EXPRESS);
16457         } else if (!tg3_flag(tp, 5705_PLUS) ||
16458                    tg3_flag(tp, 5780_CLASS)) {
16459                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16460                 if (!tp->pcix_cap) {
16461                         dev_err(&tp->pdev->dev,
16462                                 "Cannot find PCI-X capability, aborting\n");
16463                         return -EIO;
16464                 }
16465
16466                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16467                         tg3_flag_set(tp, PCIX_MODE);
16468         }
16469
16470         /* If we have an AMD 762 or VIA K8T800 chipset, write
16471          * reordering to the mailbox registers done by the host
16472          * controller can cause major troubles.  We read back from
16473          * every mailbox register write to force the writes to be
16474          * posted to the chip in order.
16475          */
16476         if (pci_dev_present(tg3_write_reorder_chipsets) &&
16477             !tg3_flag(tp, PCI_EXPRESS))
16478                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16479
16480         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16481                              &tp->pci_cacheline_sz);
16482         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16483                              &tp->pci_lat_timer);
16484         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16485             tp->pci_lat_timer < 64) {
16486                 tp->pci_lat_timer = 64;
16487                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16488                                       tp->pci_lat_timer);
16489         }
16490
16491         /* Important! -- It is critical that the PCI-X hw workaround
16492          * situation is decided before the first MMIO register access.
16493          */
16494         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16495                 /* 5700 BX chips need to have their TX producer index
16496                  * mailboxes written twice to workaround a bug.
16497                  */
16498                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16499
16500                 /* If we are in PCI-X mode, enable register write workaround.
16501                  *
16502                  * The workaround is to use indirect register accesses
16503                  * for all chip writes not to mailbox registers.
16504                  */
16505                 if (tg3_flag(tp, PCIX_MODE)) {
16506                         u32 pm_reg;
16507
16508                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16509
16510                         /* The chip can have it's power management PCI config
16511                          * space registers clobbered due to this bug.
16512                          * So explicitly force the chip into D0 here.
16513                          */
16514                         pci_read_config_dword(tp->pdev,
16515                                               tp->pdev->pm_cap + PCI_PM_CTRL,
16516                                               &pm_reg);
16517                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16518                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16519                         pci_write_config_dword(tp->pdev,
16520                                                tp->pdev->pm_cap + PCI_PM_CTRL,
16521                                                pm_reg);
16522
16523                         /* Also, force SERR#/PERR# in PCI command. */
16524                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16525                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16526                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16527                 }
16528         }
16529
16530         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16531                 tg3_flag_set(tp, PCI_HIGH_SPEED);
16532         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16533                 tg3_flag_set(tp, PCI_32BIT);
16534
16535         /* Chip-specific fixup from Broadcom driver */
16536         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16537             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16538                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16539                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16540         }
16541
16542         /* Default fast path register access methods */
16543         tp->read32 = tg3_read32;
16544         tp->write32 = tg3_write32;
16545         tp->read32_mbox = tg3_read32;
16546         tp->write32_mbox = tg3_write32;
16547         tp->write32_tx_mbox = tg3_write32;
16548         tp->write32_rx_mbox = tg3_write32;
16549
16550         /* Various workaround register access methods */
16551         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16552                 tp->write32 = tg3_write_indirect_reg32;
16553         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16554                  (tg3_flag(tp, PCI_EXPRESS) &&
16555                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16556                 /*
16557                  * Back to back register writes can cause problems on these
16558                  * chips, the workaround is to read back all reg writes
16559                  * except those to mailbox regs.
16560                  *
16561                  * See tg3_write_indirect_reg32().
16562                  */
16563                 tp->write32 = tg3_write_flush_reg32;
16564         }
16565
16566         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16567                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16568                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16569                         tp->write32_rx_mbox = tg3_write_flush_reg32;
16570         }
16571
16572         if (tg3_flag(tp, ICH_WORKAROUND)) {
16573                 tp->read32 = tg3_read_indirect_reg32;
16574                 tp->write32 = tg3_write_indirect_reg32;
16575                 tp->read32_mbox = tg3_read_indirect_mbox;
16576                 tp->write32_mbox = tg3_write_indirect_mbox;
16577                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16578                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16579
16580                 iounmap(tp->regs);
16581                 tp->regs = NULL;
16582
16583                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16584                 pci_cmd &= ~PCI_COMMAND_MEMORY;
16585                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16586         }
16587         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16588                 tp->read32_mbox = tg3_read32_mbox_5906;
16589                 tp->write32_mbox = tg3_write32_mbox_5906;
16590                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16591                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16592         }
16593
16594         if (tp->write32 == tg3_write_indirect_reg32 ||
16595             (tg3_flag(tp, PCIX_MODE) &&
16596              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16597               tg3_asic_rev(tp) == ASIC_REV_5701)))
16598                 tg3_flag_set(tp, SRAM_USE_CONFIG);
16599
16600         /* The memory arbiter has to be enabled in order for SRAM accesses
16601          * to succeed.  Normally on powerup the tg3 chip firmware will make
16602          * sure it is enabled, but other entities such as system netboot
16603          * code might disable it.
16604          */
16605         val = tr32(MEMARB_MODE);
16606         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16607
16608         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16609         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16610             tg3_flag(tp, 5780_CLASS)) {
16611                 if (tg3_flag(tp, PCIX_MODE)) {
16612                         pci_read_config_dword(tp->pdev,
16613                                               tp->pcix_cap + PCI_X_STATUS,
16614                                               &val);
16615                         tp->pci_fn = val & 0x7;
16616                 }
16617         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16618                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16619                    tg3_asic_rev(tp) == ASIC_REV_5720) {
16620                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16621                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16622                         val = tr32(TG3_CPMU_STATUS);
16623
16624                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16625                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16626                 else
16627                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16628                                      TG3_CPMU_STATUS_FSHFT_5719;
16629         }
16630
16631         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16632                 tp->write32_tx_mbox = tg3_write_flush_reg32;
16633                 tp->write32_rx_mbox = tg3_write_flush_reg32;
16634         }
16635
16636         /* Get eeprom hw config before calling tg3_set_power_state().
16637          * In particular, the TG3_FLAG_IS_NIC flag must be
16638          * determined before calling tg3_set_power_state() so that
16639          * we know whether or not to switch out of Vaux power.
16640          * When the flag is set, it means that GPIO1 is used for eeprom
16641          * write protect and also implies that it is a LOM where GPIOs
16642          * are not used to switch power.
16643          */
16644         tg3_get_eeprom_hw_cfg(tp);
16645
16646         if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16647                 tg3_flag_clear(tp, TSO_CAPABLE);
16648                 tg3_flag_clear(tp, TSO_BUG);
16649                 tp->fw_needed = NULL;
16650         }
16651
16652         if (tg3_flag(tp, ENABLE_APE)) {
16653                 /* Allow reads and writes to the
16654                  * APE register and memory space.
16655                  */
16656                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16657                                  PCISTATE_ALLOW_APE_SHMEM_WR |
16658                                  PCISTATE_ALLOW_APE_PSPACE_WR;
16659                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16660                                        pci_state_reg);
16661
16662                 tg3_ape_lock_init(tp);
16663         }
16664
16665         /* Set up tp->grc_local_ctrl before calling
16666          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16667          * will bring 5700's external PHY out of reset.
16668          * It is also used as eeprom write protect on LOMs.
16669          */
16670         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16671         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16672             tg3_flag(tp, EEPROM_WRITE_PROT))
16673                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16674                                        GRC_LCLCTRL_GPIO_OUTPUT1);
16675         /* Unused GPIO3 must be driven as output on 5752 because there
16676          * are no pull-up resistors on unused GPIO pins.
16677          */
16678         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16679                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16680
16681         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16682             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16683             tg3_flag(tp, 57765_CLASS))
16684                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16685
16686         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16687             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16688                 /* Turn off the debug UART. */
16689                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16690                 if (tg3_flag(tp, IS_NIC))
16691                         /* Keep VMain power. */
16692                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16693                                               GRC_LCLCTRL_GPIO_OUTPUT0;
16694         }
16695
16696         if (tg3_asic_rev(tp) == ASIC_REV_5762)
16697                 tp->grc_local_ctrl |=
16698                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16699
16700         /* Switch out of Vaux if it is a NIC */
16701         tg3_pwrsrc_switch_to_vmain(tp);
16702
16703         /* Derive initial jumbo mode from MTU assigned in
16704          * ether_setup() via the alloc_etherdev() call
16705          */
16706         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16707                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16708
16709         /* Determine WakeOnLan speed to use. */
16710         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16711             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16712             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16713             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16714                 tg3_flag_clear(tp, WOL_SPEED_100MB);
16715         } else {
16716                 tg3_flag_set(tp, WOL_SPEED_100MB);
16717         }
16718
16719         if (tg3_asic_rev(tp) == ASIC_REV_5906)
16720                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16721
16722         /* A few boards don't want Ethernet@WireSpeed phy feature */
16723         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16724             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16725              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16726              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16727             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16728             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16729                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16730
16731         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16732             tg3_chip_rev(tp) == CHIPREV_5704_AX)
16733                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16734         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16735                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16736
16737         if (tg3_flag(tp, 5705_PLUS) &&
16738             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16739             tg3_asic_rev(tp) != ASIC_REV_5785 &&
16740             tg3_asic_rev(tp) != ASIC_REV_57780 &&
16741             !tg3_flag(tp, 57765_PLUS)) {
16742                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16743                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
16744                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
16745                     tg3_asic_rev(tp) == ASIC_REV_5761) {
16746                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16747                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16748                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16749                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16750                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16751                 } else
16752                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16753         }
16754
16755         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16756             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16757                 tp->phy_otp = tg3_read_otp_phycfg(tp);
16758                 if (tp->phy_otp == 0)
16759                         tp->phy_otp = TG3_OTP_DEFAULT;
16760         }
16761
16762         if (tg3_flag(tp, CPMU_PRESENT))
16763                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16764         else
16765                 tp->mi_mode = MAC_MI_MODE_BASE;
16766
16767         tp->coalesce_mode = 0;
16768         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16769             tg3_chip_rev(tp) != CHIPREV_5700_BX)
16770                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16771
16772         /* Set these bits to enable statistics workaround. */
16773         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16774             tg3_asic_rev(tp) == ASIC_REV_5762 ||
16775             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16776             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16777                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16778                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16779         }
16780
16781         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16782             tg3_asic_rev(tp) == ASIC_REV_57780)
16783                 tg3_flag_set(tp, USE_PHYLIB);
16784
16785         err = tg3_mdio_init(tp);
16786         if (err)
16787                 return err;
16788
16789         /* Initialize data/descriptor byte/word swapping. */
16790         val = tr32(GRC_MODE);
16791         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16792             tg3_asic_rev(tp) == ASIC_REV_5762)
16793                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16794                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
16795                         GRC_MODE_B2HRX_ENABLE |
16796                         GRC_MODE_HTX2B_ENABLE |
16797                         GRC_MODE_HOST_STACKUP);
16798         else
16799                 val &= GRC_MODE_HOST_STACKUP;
16800
16801         tw32(GRC_MODE, val | tp->grc_mode);
16802
16803         tg3_switch_clocks(tp);
16804
16805         /* Clear this out for sanity. */
16806         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16807
16808         /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16809         tw32(TG3PCI_REG_BASE_ADDR, 0);
16810
16811         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16812                               &pci_state_reg);
16813         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16814             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16815                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16816                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16817                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16818                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16819                         void __iomem *sram_base;
16820
16821                         /* Write some dummy words into the SRAM status block
16822                          * area, see if it reads back correctly.  If the return
16823                          * value is bad, force enable the PCIX workaround.
16824                          */
16825                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16826
16827                         writel(0x00000000, sram_base);
16828                         writel(0x00000000, sram_base + 4);
16829                         writel(0xffffffff, sram_base + 4);
16830                         if (readl(sram_base) != 0x00000000)
16831                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16832                 }
16833         }
16834
16835         udelay(50);
16836         tg3_nvram_init(tp);
16837
16838         /* If the device has an NVRAM, no need to load patch firmware */
16839         if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16840             !tg3_flag(tp, NO_NVRAM))
16841                 tp->fw_needed = NULL;
16842
16843         grc_misc_cfg = tr32(GRC_MISC_CFG);
16844         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16845
16846         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16847             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16848              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16849                 tg3_flag_set(tp, IS_5788);
16850
16851         if (!tg3_flag(tp, IS_5788) &&
16852             tg3_asic_rev(tp) != ASIC_REV_5700)
16853                 tg3_flag_set(tp, TAGGED_STATUS);
16854         if (tg3_flag(tp, TAGGED_STATUS)) {
16855                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16856                                       HOSTCC_MODE_CLRTICK_TXBD);
16857
16858                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16859                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16860                                        tp->misc_host_ctrl);
16861         }
16862
16863         /* Preserve the APE MAC_MODE bits */
16864         if (tg3_flag(tp, ENABLE_APE))
16865                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16866         else
16867                 tp->mac_mode = 0;
16868
16869         if (tg3_10_100_only_device(tp, ent))
16870                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16871
16872         err = tg3_phy_probe(tp);
16873         if (err) {
16874                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16875                 /* ... but do not return immediately ... */
16876                 tg3_mdio_fini(tp);
16877         }
16878
16879         tg3_read_vpd(tp);
16880         tg3_read_fw_ver(tp);
16881
16882         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16883                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16884         } else {
16885                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16886                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16887                 else
16888                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16889         }
16890
16891         /* 5700 {AX,BX} chips have a broken status block link
16892          * change bit implementation, so we must use the
16893          * status register in those cases.
16894          */
16895         if (tg3_asic_rev(tp) == ASIC_REV_5700)
16896                 tg3_flag_set(tp, USE_LINKCHG_REG);
16897         else
16898                 tg3_flag_clear(tp, USE_LINKCHG_REG);
16899
16900         /* The led_ctrl is set during tg3_phy_probe, here we might
16901          * have to force the link status polling mechanism based
16902          * upon subsystem IDs.
16903          */
16904         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16905             tg3_asic_rev(tp) == ASIC_REV_5701 &&
16906             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16907                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16908                 tg3_flag_set(tp, USE_LINKCHG_REG);
16909         }
16910
16911         /* For all SERDES we poll the MAC status register. */
16912         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16913                 tg3_flag_set(tp, POLL_SERDES);
16914         else
16915                 tg3_flag_clear(tp, POLL_SERDES);
16916
16917         if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16918                 tg3_flag_set(tp, POLL_CPMU_LINK);
16919
16920         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16921         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16922         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16923             tg3_flag(tp, PCIX_MODE)) {
16924                 tp->rx_offset = NET_SKB_PAD;
16925 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16926                 tp->rx_copy_thresh = ~(u16)0;
16927 #endif
16928         }
16929
16930         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16931         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16932         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16933
16934         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16935
16936         /* Increment the rx prod index on the rx std ring by at most
16937          * 8 for these chips to workaround hw errata.
16938          */
16939         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16940             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16941             tg3_asic_rev(tp) == ASIC_REV_5755)
16942                 tp->rx_std_max_post = 8;
16943
16944         if (tg3_flag(tp, ASPM_WORKAROUND))
16945                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16946                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
16947
16948         return err;
16949 }
16950
16951 #ifdef CONFIG_SPARC
16952 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16953 {
16954         struct net_device *dev = tp->dev;
16955         struct pci_dev *pdev = tp->pdev;
16956         struct device_node *dp = pci_device_to_OF_node(pdev);
16957         const unsigned char *addr;
16958         int len;
16959
16960         addr = of_get_property(dp, "local-mac-address", &len);
16961         if (addr && len == ETH_ALEN) {
16962                 memcpy(dev->dev_addr, addr, ETH_ALEN);
16963                 return 0;
16964         }
16965         return -ENODEV;
16966 }
16967
16968 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16969 {
16970         struct net_device *dev = tp->dev;
16971
16972         memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
16973         return 0;
16974 }
16975 #endif
16976
16977 static int tg3_get_device_address(struct tg3 *tp)
16978 {
16979         struct net_device *dev = tp->dev;
16980         u32 hi, lo, mac_offset;
16981         int addr_ok = 0;
16982         int err;
16983
16984 #ifdef CONFIG_SPARC
16985         if (!tg3_get_macaddr_sparc(tp))
16986                 return 0;
16987 #endif
16988
16989         if (tg3_flag(tp, IS_SSB_CORE)) {
16990                 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16991                 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16992                         return 0;
16993         }
16994
16995         mac_offset = 0x7c;
16996         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16997             tg3_flag(tp, 5780_CLASS)) {
16998                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16999                         mac_offset = 0xcc;
17000                 if (tg3_nvram_lock(tp))
17001                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
17002                 else
17003                         tg3_nvram_unlock(tp);
17004         } else if (tg3_flag(tp, 5717_PLUS)) {
17005                 if (tp->pci_fn & 1)
17006                         mac_offset = 0xcc;
17007                 if (tp->pci_fn > 1)
17008                         mac_offset += 0x18c;
17009         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17010                 mac_offset = 0x10;
17011
17012         /* First try to get it from MAC address mailbox. */
17013         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17014         if ((hi >> 16) == 0x484b) {
17015                 dev->dev_addr[0] = (hi >>  8) & 0xff;
17016                 dev->dev_addr[1] = (hi >>  0) & 0xff;
17017
17018                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17019                 dev->dev_addr[2] = (lo >> 24) & 0xff;
17020                 dev->dev_addr[3] = (lo >> 16) & 0xff;
17021                 dev->dev_addr[4] = (lo >>  8) & 0xff;
17022                 dev->dev_addr[5] = (lo >>  0) & 0xff;
17023
17024                 /* Some old bootcode may report a 0 MAC address in SRAM */
17025                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
17026         }
17027         if (!addr_ok) {
17028                 /* Next, try NVRAM. */
17029                 if (!tg3_flag(tp, NO_NVRAM) &&
17030                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
17031                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
17032                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
17033                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
17034                 }
17035                 /* Finally just fetch it out of the MAC control regs. */
17036                 else {
17037                         hi = tr32(MAC_ADDR_0_HIGH);
17038                         lo = tr32(MAC_ADDR_0_LOW);
17039
17040                         dev->dev_addr[5] = lo & 0xff;
17041                         dev->dev_addr[4] = (lo >> 8) & 0xff;
17042                         dev->dev_addr[3] = (lo >> 16) & 0xff;
17043                         dev->dev_addr[2] = (lo >> 24) & 0xff;
17044                         dev->dev_addr[1] = hi & 0xff;
17045                         dev->dev_addr[0] = (hi >> 8) & 0xff;
17046                 }
17047         }
17048
17049         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
17050 #ifdef CONFIG_SPARC
17051                 if (!tg3_get_default_macaddr_sparc(tp))
17052                         return 0;
17053 #endif
17054                 return -EINVAL;
17055         }
17056         return 0;
17057 }
17058
17059 #define BOUNDARY_SINGLE_CACHELINE       1
17060 #define BOUNDARY_MULTI_CACHELINE        2
17061
17062 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17063 {
17064         int cacheline_size;
17065         u8 byte;
17066         int goal;
17067
17068         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17069         if (byte == 0)
17070                 cacheline_size = 1024;
17071         else
17072                 cacheline_size = (int) byte * 4;
17073
17074         /* On 5703 and later chips, the boundary bits have no
17075          * effect.
17076          */
17077         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17078             tg3_asic_rev(tp) != ASIC_REV_5701 &&
17079             !tg3_flag(tp, PCI_EXPRESS))
17080                 goto out;
17081
17082 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17083         goal = BOUNDARY_MULTI_CACHELINE;
17084 #else
17085 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17086         goal = BOUNDARY_SINGLE_CACHELINE;
17087 #else
17088         goal = 0;
17089 #endif
17090 #endif
17091
17092         if (tg3_flag(tp, 57765_PLUS)) {
17093                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17094                 goto out;
17095         }
17096
17097         if (!goal)
17098                 goto out;
17099
17100         /* PCI controllers on most RISC systems tend to disconnect
17101          * when a device tries to burst across a cache-line boundary.
17102          * Therefore, letting tg3 do so just wastes PCI bandwidth.
17103          *
17104          * Unfortunately, for PCI-E there are only limited
17105          * write-side controls for this, and thus for reads
17106          * we will still get the disconnects.  We'll also waste
17107          * these PCI cycles for both read and write for chips
17108          * other than 5700 and 5701 which do not implement the
17109          * boundary bits.
17110          */
17111         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17112                 switch (cacheline_size) {
17113                 case 16:
17114                 case 32:
17115                 case 64:
17116                 case 128:
17117                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17118                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17119                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17120                         } else {
17121                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17122                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17123                         }
17124                         break;
17125
17126                 case 256:
17127                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17128                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17129                         break;
17130
17131                 default:
17132                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17133                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17134                         break;
17135                 }
17136         } else if (tg3_flag(tp, PCI_EXPRESS)) {
17137                 switch (cacheline_size) {
17138                 case 16:
17139                 case 32:
17140                 case 64:
17141                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17142                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17143                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17144                                 break;
17145                         }
17146                         /* fallthrough */
17147                 case 128:
17148                 default:
17149                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17150                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17151                         break;
17152                 }
17153         } else {
17154                 switch (cacheline_size) {
17155                 case 16:
17156                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17157                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17158                                         DMA_RWCTRL_WRITE_BNDRY_16);
17159                                 break;
17160                         }
17161                         /* fallthrough */
17162                 case 32:
17163                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17164                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17165                                         DMA_RWCTRL_WRITE_BNDRY_32);
17166                                 break;
17167                         }
17168                         /* fallthrough */
17169                 case 64:
17170                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17171                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17172                                         DMA_RWCTRL_WRITE_BNDRY_64);
17173                                 break;
17174                         }
17175                         /* fallthrough */
17176                 case 128:
17177                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17178                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17179                                         DMA_RWCTRL_WRITE_BNDRY_128);
17180                                 break;
17181                         }
17182                         /* fallthrough */
17183                 case 256:
17184                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
17185                                 DMA_RWCTRL_WRITE_BNDRY_256);
17186                         break;
17187                 case 512:
17188                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
17189                                 DMA_RWCTRL_WRITE_BNDRY_512);
17190                         break;
17191                 case 1024:
17192                 default:
17193                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17194                                 DMA_RWCTRL_WRITE_BNDRY_1024);
17195                         break;
17196                 }
17197         }
17198
17199 out:
17200         return val;
17201 }
17202
17203 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17204                            int size, bool to_device)
17205 {
17206         struct tg3_internal_buffer_desc test_desc;
17207         u32 sram_dma_descs;
17208         int i, ret;
17209
17210         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17211
17212         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17213         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17214         tw32(RDMAC_STATUS, 0);
17215         tw32(WDMAC_STATUS, 0);
17216
17217         tw32(BUFMGR_MODE, 0);
17218         tw32(FTQ_RESET, 0);
17219
17220         test_desc.addr_hi = ((u64) buf_dma) >> 32;
17221         test_desc.addr_lo = buf_dma & 0xffffffff;
17222         test_desc.nic_mbuf = 0x00002100;
17223         test_desc.len = size;
17224
17225         /*
17226          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17227          * the *second* time the tg3 driver was getting loaded after an
17228          * initial scan.
17229          *
17230          * Broadcom tells me:
17231          *   ...the DMA engine is connected to the GRC block and a DMA
17232          *   reset may affect the GRC block in some unpredictable way...
17233          *   The behavior of resets to individual blocks has not been tested.
17234          *
17235          * Broadcom noted the GRC reset will also reset all sub-components.
17236          */
17237         if (to_device) {
17238                 test_desc.cqid_sqid = (13 << 8) | 2;
17239
17240                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17241                 udelay(40);
17242         } else {
17243                 test_desc.cqid_sqid = (16 << 8) | 7;
17244
17245                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17246                 udelay(40);
17247         }
17248         test_desc.flags = 0x00000005;
17249
17250         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17251                 u32 val;
17252
17253                 val = *(((u32 *)&test_desc) + i);
17254                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17255                                        sram_dma_descs + (i * sizeof(u32)));
17256                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17257         }
17258         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17259
17260         if (to_device)
17261                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17262         else
17263                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17264
17265         ret = -ENODEV;
17266         for (i = 0; i < 40; i++) {
17267                 u32 val;
17268
17269                 if (to_device)
17270                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17271                 else
17272                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17273                 if ((val & 0xffff) == sram_dma_descs) {
17274                         ret = 0;
17275                         break;
17276                 }
17277
17278                 udelay(100);
17279         }
17280
17281         return ret;
17282 }
17283
17284 #define TEST_BUFFER_SIZE        0x2000
17285
17286 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17287         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17288         { },
17289 };
17290
17291 static int tg3_test_dma(struct tg3 *tp)
17292 {
17293         dma_addr_t buf_dma;
17294         u32 *buf, saved_dma_rwctrl;
17295         int ret = 0;
17296
17297         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17298                                  &buf_dma, GFP_KERNEL);
17299         if (!buf) {
17300                 ret = -ENOMEM;
17301                 goto out_nofree;
17302         }
17303
17304         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17305                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17306
17307         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17308
17309         if (tg3_flag(tp, 57765_PLUS))
17310                 goto out;
17311
17312         if (tg3_flag(tp, PCI_EXPRESS)) {
17313                 /* DMA read watermark not used on PCIE */
17314                 tp->dma_rwctrl |= 0x00180000;
17315         } else if (!tg3_flag(tp, PCIX_MODE)) {
17316                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17317                     tg3_asic_rev(tp) == ASIC_REV_5750)
17318                         tp->dma_rwctrl |= 0x003f0000;
17319                 else
17320                         tp->dma_rwctrl |= 0x003f000f;
17321         } else {
17322                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17323                     tg3_asic_rev(tp) == ASIC_REV_5704) {
17324                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17325                         u32 read_water = 0x7;
17326
17327                         /* If the 5704 is behind the EPB bridge, we can
17328                          * do the less restrictive ONE_DMA workaround for
17329                          * better performance.
17330                          */
17331                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17332                             tg3_asic_rev(tp) == ASIC_REV_5704)
17333                                 tp->dma_rwctrl |= 0x8000;
17334                         else if (ccval == 0x6 || ccval == 0x7)
17335                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17336
17337                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
17338                                 read_water = 4;
17339                         /* Set bit 23 to enable PCIX hw bug fix */
17340                         tp->dma_rwctrl |=
17341                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17342                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17343                                 (1 << 23);
17344                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17345                         /* 5780 always in PCIX mode */
17346                         tp->dma_rwctrl |= 0x00144000;
17347                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17348                         /* 5714 always in PCIX mode */
17349                         tp->dma_rwctrl |= 0x00148000;
17350                 } else {
17351                         tp->dma_rwctrl |= 0x001b000f;
17352                 }
17353         }
17354         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17355                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17356
17357         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17358             tg3_asic_rev(tp) == ASIC_REV_5704)
17359                 tp->dma_rwctrl &= 0xfffffff0;
17360
17361         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17362             tg3_asic_rev(tp) == ASIC_REV_5701) {
17363                 /* Remove this if it causes problems for some boards. */
17364                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17365
17366                 /* On 5700/5701 chips, we need to set this bit.
17367                  * Otherwise the chip will issue cacheline transactions
17368                  * to streamable DMA memory with not all the byte
17369                  * enables turned on.  This is an error on several
17370                  * RISC PCI controllers, in particular sparc64.
17371                  *
17372                  * On 5703/5704 chips, this bit has been reassigned
17373                  * a different meaning.  In particular, it is used
17374                  * on those chips to enable a PCI-X workaround.
17375                  */
17376                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17377         }
17378
17379         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17380
17381
17382         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17383             tg3_asic_rev(tp) != ASIC_REV_5701)
17384                 goto out;
17385
17386         /* It is best to perform DMA test with maximum write burst size
17387          * to expose the 5700/5701 write DMA bug.
17388          */
17389         saved_dma_rwctrl = tp->dma_rwctrl;
17390         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17391         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17392
17393         while (1) {
17394                 u32 *p = buf, i;
17395
17396                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17397                         p[i] = i;
17398
17399                 /* Send the buffer to the chip. */
17400                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17401                 if (ret) {
17402                         dev_err(&tp->pdev->dev,
17403                                 "%s: Buffer write failed. err = %d\n",
17404                                 __func__, ret);
17405                         break;
17406                 }
17407
17408                 /* Now read it back. */
17409                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17410                 if (ret) {
17411                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17412                                 "err = %d\n", __func__, ret);
17413                         break;
17414                 }
17415
17416                 /* Verify it. */
17417                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17418                         if (p[i] == i)
17419                                 continue;
17420
17421                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17422                             DMA_RWCTRL_WRITE_BNDRY_16) {
17423                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17424                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17425                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17426                                 break;
17427                         } else {
17428                                 dev_err(&tp->pdev->dev,
17429                                         "%s: Buffer corrupted on read back! "
17430                                         "(%d != %d)\n", __func__, p[i], i);
17431                                 ret = -ENODEV;
17432                                 goto out;
17433                         }
17434                 }
17435
17436                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17437                         /* Success. */
17438                         ret = 0;
17439                         break;
17440                 }
17441         }
17442         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17443             DMA_RWCTRL_WRITE_BNDRY_16) {
17444                 /* DMA test passed without adjusting DMA boundary,
17445                  * now look for chipsets that are known to expose the
17446                  * DMA bug without failing the test.
17447                  */
17448                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17449                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17450                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17451                 } else {
17452                         /* Safe to use the calculated DMA boundary. */
17453                         tp->dma_rwctrl = saved_dma_rwctrl;
17454                 }
17455
17456                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17457         }
17458
17459 out:
17460         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17461 out_nofree:
17462         return ret;
17463 }
17464
17465 static void tg3_init_bufmgr_config(struct tg3 *tp)
17466 {
17467         if (tg3_flag(tp, 57765_PLUS)) {
17468                 tp->bufmgr_config.mbuf_read_dma_low_water =
17469                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17470                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17471                         DEFAULT_MB_MACRX_LOW_WATER_57765;
17472                 tp->bufmgr_config.mbuf_high_water =
17473                         DEFAULT_MB_HIGH_WATER_57765;
17474
17475                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17476                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17477                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17478                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17479                 tp->bufmgr_config.mbuf_high_water_jumbo =
17480                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17481         } else if (tg3_flag(tp, 5705_PLUS)) {
17482                 tp->bufmgr_config.mbuf_read_dma_low_water =
17483                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17484                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17485                         DEFAULT_MB_MACRX_LOW_WATER_5705;
17486                 tp->bufmgr_config.mbuf_high_water =
17487                         DEFAULT_MB_HIGH_WATER_5705;
17488                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17489                         tp->bufmgr_config.mbuf_mac_rx_low_water =
17490                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
17491                         tp->bufmgr_config.mbuf_high_water =
17492                                 DEFAULT_MB_HIGH_WATER_5906;
17493                 }
17494
17495                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17496                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17497                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17498                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17499                 tp->bufmgr_config.mbuf_high_water_jumbo =
17500                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17501         } else {
17502                 tp->bufmgr_config.mbuf_read_dma_low_water =
17503                         DEFAULT_MB_RDMA_LOW_WATER;
17504                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17505                         DEFAULT_MB_MACRX_LOW_WATER;
17506                 tp->bufmgr_config.mbuf_high_water =
17507                         DEFAULT_MB_HIGH_WATER;
17508
17509                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17510                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17511                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17512                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17513                 tp->bufmgr_config.mbuf_high_water_jumbo =
17514                         DEFAULT_MB_HIGH_WATER_JUMBO;
17515         }
17516
17517         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17518         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17519 }
17520
17521 static char *tg3_phy_string(struct tg3 *tp)
17522 {
17523         switch (tp->phy_id & TG3_PHY_ID_MASK) {
17524         case TG3_PHY_ID_BCM5400:        return "5400";
17525         case TG3_PHY_ID_BCM5401:        return "5401";
17526         case TG3_PHY_ID_BCM5411:        return "5411";
17527         case TG3_PHY_ID_BCM5701:        return "5701";
17528         case TG3_PHY_ID_BCM5703:        return "5703";
17529         case TG3_PHY_ID_BCM5704:        return "5704";
17530         case TG3_PHY_ID_BCM5705:        return "5705";
17531         case TG3_PHY_ID_BCM5750:        return "5750";
17532         case TG3_PHY_ID_BCM5752:        return "5752";
17533         case TG3_PHY_ID_BCM5714:        return "5714";
17534         case TG3_PHY_ID_BCM5780:        return "5780";
17535         case TG3_PHY_ID_BCM5755:        return "5755";
17536         case TG3_PHY_ID_BCM5787:        return "5787";
17537         case TG3_PHY_ID_BCM5784:        return "5784";
17538         case TG3_PHY_ID_BCM5756:        return "5722/5756";
17539         case TG3_PHY_ID_BCM5906:        return "5906";
17540         case TG3_PHY_ID_BCM5761:        return "5761";
17541         case TG3_PHY_ID_BCM5718C:       return "5718C";
17542         case TG3_PHY_ID_BCM5718S:       return "5718S";
17543         case TG3_PHY_ID_BCM57765:       return "57765";
17544         case TG3_PHY_ID_BCM5719C:       return "5719C";
17545         case TG3_PHY_ID_BCM5720C:       return "5720C";
17546         case TG3_PHY_ID_BCM5762:        return "5762C";
17547         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
17548         case 0:                 return "serdes";
17549         default:                return "unknown";
17550         }
17551 }
17552
17553 static char *tg3_bus_string(struct tg3 *tp, char *str)
17554 {
17555         if (tg3_flag(tp, PCI_EXPRESS)) {
17556                 strcpy(str, "PCI Express");
17557                 return str;
17558         } else if (tg3_flag(tp, PCIX_MODE)) {
17559                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17560
17561                 strcpy(str, "PCIX:");
17562
17563                 if ((clock_ctrl == 7) ||
17564                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17565                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17566                         strcat(str, "133MHz");
17567                 else if (clock_ctrl == 0)
17568                         strcat(str, "33MHz");
17569                 else if (clock_ctrl == 2)
17570                         strcat(str, "50MHz");
17571                 else if (clock_ctrl == 4)
17572                         strcat(str, "66MHz");
17573                 else if (clock_ctrl == 6)
17574                         strcat(str, "100MHz");
17575         } else {
17576                 strcpy(str, "PCI:");
17577                 if (tg3_flag(tp, PCI_HIGH_SPEED))
17578                         strcat(str, "66MHz");
17579                 else
17580                         strcat(str, "33MHz");
17581         }
17582         if (tg3_flag(tp, PCI_32BIT))
17583                 strcat(str, ":32-bit");
17584         else
17585                 strcat(str, ":64-bit");
17586         return str;
17587 }
17588
17589 static void tg3_init_coal(struct tg3 *tp)
17590 {
17591         struct ethtool_coalesce *ec = &tp->coal;
17592
17593         memset(ec, 0, sizeof(*ec));
17594         ec->cmd = ETHTOOL_GCOALESCE;
17595         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17596         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17597         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17598         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17599         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17600         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17601         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17602         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17603         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17604
17605         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17606                                  HOSTCC_MODE_CLRTICK_TXBD)) {
17607                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17608                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17609                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17610                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17611         }
17612
17613         if (tg3_flag(tp, 5705_PLUS)) {
17614                 ec->rx_coalesce_usecs_irq = 0;
17615                 ec->tx_coalesce_usecs_irq = 0;
17616                 ec->stats_block_coalesce_usecs = 0;
17617         }
17618 }
17619
17620 static int tg3_init_one(struct pci_dev *pdev,
17621                                   const struct pci_device_id *ent)
17622 {
17623         struct net_device *dev;
17624         struct tg3 *tp;
17625         int i, err;
17626         u32 sndmbx, rcvmbx, intmbx;
17627         char str[40];
17628         u64 dma_mask, persist_dma_mask;
17629         netdev_features_t features = 0;
17630
17631         printk_once(KERN_INFO "%s\n", version);
17632
17633         err = pci_enable_device(pdev);
17634         if (err) {
17635                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17636                 return err;
17637         }
17638
17639         err = pci_request_regions(pdev, DRV_MODULE_NAME);
17640         if (err) {
17641                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17642                 goto err_out_disable_pdev;
17643         }
17644
17645         pci_set_master(pdev);
17646
17647         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17648         if (!dev) {
17649                 err = -ENOMEM;
17650                 goto err_out_free_res;
17651         }
17652
17653         SET_NETDEV_DEV(dev, &pdev->dev);
17654
17655         tp = netdev_priv(dev);
17656         tp->pdev = pdev;
17657         tp->dev = dev;
17658         tp->rx_mode = TG3_DEF_RX_MODE;
17659         tp->tx_mode = TG3_DEF_TX_MODE;
17660         tp->irq_sync = 1;
17661         tp->pcierr_recovery = false;
17662
17663         if (tg3_debug > 0)
17664                 tp->msg_enable = tg3_debug;
17665         else
17666                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17667
17668         if (pdev_is_ssb_gige_core(pdev)) {
17669                 tg3_flag_set(tp, IS_SSB_CORE);
17670                 if (ssb_gige_must_flush_posted_writes(pdev))
17671                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17672                 if (ssb_gige_one_dma_at_once(pdev))
17673                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17674                 if (ssb_gige_have_roboswitch(pdev)) {
17675                         tg3_flag_set(tp, USE_PHYLIB);
17676                         tg3_flag_set(tp, ROBOSWITCH);
17677                 }
17678                 if (ssb_gige_is_rgmii(pdev))
17679                         tg3_flag_set(tp, RGMII_MODE);
17680         }
17681
17682         /* The word/byte swap controls here control register access byte
17683          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17684          * setting below.
17685          */
17686         tp->misc_host_ctrl =
17687                 MISC_HOST_CTRL_MASK_PCI_INT |
17688                 MISC_HOST_CTRL_WORD_SWAP |
17689                 MISC_HOST_CTRL_INDIR_ACCESS |
17690                 MISC_HOST_CTRL_PCISTATE_RW;
17691
17692         /* The NONFRM (non-frame) byte/word swap controls take effect
17693          * on descriptor entries, anything which isn't packet data.
17694          *
17695          * The StrongARM chips on the board (one for tx, one for rx)
17696          * are running in big-endian mode.
17697          */
17698         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17699                         GRC_MODE_WSWAP_NONFRM_DATA);
17700 #ifdef __BIG_ENDIAN
17701         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17702 #endif
17703         spin_lock_init(&tp->lock);
17704         spin_lock_init(&tp->indirect_lock);
17705         INIT_WORK(&tp->reset_task, tg3_reset_task);
17706
17707         tp->regs = pci_ioremap_bar(pdev, BAR_0);
17708         if (!tp->regs) {
17709                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17710                 err = -ENOMEM;
17711                 goto err_out_free_dev;
17712         }
17713
17714         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17715             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17716             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17717             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17718             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17719             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17720             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17721             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17722             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17723             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17724             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17725             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17726             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17727             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17728             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17729                 tg3_flag_set(tp, ENABLE_APE);
17730                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17731                 if (!tp->aperegs) {
17732                         dev_err(&pdev->dev,
17733                                 "Cannot map APE registers, aborting\n");
17734                         err = -ENOMEM;
17735                         goto err_out_iounmap;
17736                 }
17737         }
17738
17739         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17740         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17741
17742         dev->ethtool_ops = &tg3_ethtool_ops;
17743         dev->watchdog_timeo = TG3_TX_TIMEOUT;
17744         dev->netdev_ops = &tg3_netdev_ops;
17745         dev->irq = pdev->irq;
17746
17747         err = tg3_get_invariants(tp, ent);
17748         if (err) {
17749                 dev_err(&pdev->dev,
17750                         "Problem fetching invariants of chip, aborting\n");
17751                 goto err_out_apeunmap;
17752         }
17753
17754         /* The EPB bridge inside 5714, 5715, and 5780 and any
17755          * device behind the EPB cannot support DMA addresses > 40-bit.
17756          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17757          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17758          * do DMA address check in tg3_start_xmit().
17759          */
17760         if (tg3_flag(tp, IS_5788))
17761                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17762         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17763                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17764 #ifdef CONFIG_HIGHMEM
17765                 dma_mask = DMA_BIT_MASK(64);
17766 #endif
17767         } else
17768                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17769
17770         /* Configure DMA attributes. */
17771         if (dma_mask > DMA_BIT_MASK(32)) {
17772                 err = pci_set_dma_mask(pdev, dma_mask);
17773                 if (!err) {
17774                         features |= NETIF_F_HIGHDMA;
17775                         err = pci_set_consistent_dma_mask(pdev,
17776                                                           persist_dma_mask);
17777                         if (err < 0) {
17778                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17779                                         "DMA for consistent allocations\n");
17780                                 goto err_out_apeunmap;
17781                         }
17782                 }
17783         }
17784         if (err || dma_mask == DMA_BIT_MASK(32)) {
17785                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17786                 if (err) {
17787                         dev_err(&pdev->dev,
17788                                 "No usable DMA configuration, aborting\n");
17789                         goto err_out_apeunmap;
17790                 }
17791         }
17792
17793         tg3_init_bufmgr_config(tp);
17794
17795         /* 5700 B0 chips do not support checksumming correctly due
17796          * to hardware bugs.
17797          */
17798         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17799                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17800
17801                 if (tg3_flag(tp, 5755_PLUS))
17802                         features |= NETIF_F_IPV6_CSUM;
17803         }
17804
17805         /* TSO is on by default on chips that support hardware TSO.
17806          * Firmware TSO on older chips gives lower performance, so it
17807          * is off by default, but can be enabled using ethtool.
17808          */
17809         if ((tg3_flag(tp, HW_TSO_1) ||
17810              tg3_flag(tp, HW_TSO_2) ||
17811              tg3_flag(tp, HW_TSO_3)) &&
17812             (features & NETIF_F_IP_CSUM))
17813                 features |= NETIF_F_TSO;
17814         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17815                 if (features & NETIF_F_IPV6_CSUM)
17816                         features |= NETIF_F_TSO6;
17817                 if (tg3_flag(tp, HW_TSO_3) ||
17818                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
17819                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17820                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17821                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
17822                     tg3_asic_rev(tp) == ASIC_REV_57780)
17823                         features |= NETIF_F_TSO_ECN;
17824         }
17825
17826         dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17827                          NETIF_F_HW_VLAN_CTAG_RX;
17828         dev->vlan_features |= features;
17829
17830         /*
17831          * Add loopback capability only for a subset of devices that support
17832          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17833          * loopback for the remaining devices.
17834          */
17835         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17836             !tg3_flag(tp, CPMU_PRESENT))
17837                 /* Add the loopback capability */
17838                 features |= NETIF_F_LOOPBACK;
17839
17840         dev->hw_features |= features;
17841         dev->priv_flags |= IFF_UNICAST_FLT;
17842
17843         /* MTU range: 60 - 9000 or 1500, depending on hardware */
17844         dev->min_mtu = TG3_MIN_MTU;
17845         dev->max_mtu = TG3_MAX_MTU(tp);
17846
17847         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17848             !tg3_flag(tp, TSO_CAPABLE) &&
17849             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17850                 tg3_flag_set(tp, MAX_RXPEND_64);
17851                 tp->rx_pending = 63;
17852         }
17853
17854         err = tg3_get_device_address(tp);
17855         if (err) {
17856                 dev_err(&pdev->dev,
17857                         "Could not obtain valid ethernet address, aborting\n");
17858                 goto err_out_apeunmap;
17859         }
17860
17861         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17862         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17863         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17864         for (i = 0; i < tp->irq_max; i++) {
17865                 struct tg3_napi *tnapi = &tp->napi[i];
17866
17867                 tnapi->tp = tp;
17868                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17869
17870                 tnapi->int_mbox = intmbx;
17871                 if (i <= 4)
17872                         intmbx += 0x8;
17873                 else
17874                         intmbx += 0x4;
17875
17876                 tnapi->consmbox = rcvmbx;
17877                 tnapi->prodmbox = sndmbx;
17878
17879                 if (i)
17880                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17881                 else
17882                         tnapi->coal_now = HOSTCC_MODE_NOW;
17883
17884                 if (!tg3_flag(tp, SUPPORT_MSIX))
17885                         break;
17886
17887                 /*
17888                  * If we support MSIX, we'll be using RSS.  If we're using
17889                  * RSS, the first vector only handles link interrupts and the
17890                  * remaining vectors handle rx and tx interrupts.  Reuse the
17891                  * mailbox values for the next iteration.  The values we setup
17892                  * above are still useful for the single vectored mode.
17893                  */
17894                 if (!i)
17895                         continue;
17896
17897                 rcvmbx += 0x8;
17898
17899                 if (sndmbx & 0x4)
17900                         sndmbx -= 0x4;
17901                 else
17902                         sndmbx += 0xc;
17903         }
17904
17905         /*
17906          * Reset chip in case UNDI or EFI driver did not shutdown
17907          * DMA self test will enable WDMAC and we'll see (spurious)
17908          * pending DMA on the PCI bus at that point.
17909          */
17910         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17911             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17912                 tg3_full_lock(tp, 0);
17913                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17914                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17915                 tg3_full_unlock(tp);
17916         }
17917
17918         err = tg3_test_dma(tp);
17919         if (err) {
17920                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17921                 goto err_out_apeunmap;
17922         }
17923
17924         tg3_init_coal(tp);
17925
17926         pci_set_drvdata(pdev, dev);
17927
17928         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17929             tg3_asic_rev(tp) == ASIC_REV_5720 ||
17930             tg3_asic_rev(tp) == ASIC_REV_5762)
17931                 tg3_flag_set(tp, PTP_CAPABLE);
17932
17933         tg3_timer_init(tp);
17934
17935         tg3_carrier_off(tp);
17936
17937         err = register_netdev(dev);
17938         if (err) {
17939                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17940                 goto err_out_apeunmap;
17941         }
17942
17943         if (tg3_flag(tp, PTP_CAPABLE)) {
17944                 tg3_ptp_init(tp);
17945                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17946                                                    &tp->pdev->dev);
17947                 if (IS_ERR(tp->ptp_clock))
17948                         tp->ptp_clock = NULL;
17949         }
17950
17951         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17952                     tp->board_part_number,
17953                     tg3_chip_rev_id(tp),
17954                     tg3_bus_string(tp, str),
17955                     dev->dev_addr);
17956
17957         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17958                 char *ethtype;
17959
17960                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17961                         ethtype = "10/100Base-TX";
17962                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17963                         ethtype = "1000Base-SX";
17964                 else
17965                         ethtype = "10/100/1000Base-T";
17966
17967                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17968                             "(WireSpeed[%d], EEE[%d])\n",
17969                             tg3_phy_string(tp), ethtype,
17970                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17971                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17972         }
17973
17974         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17975                     (dev->features & NETIF_F_RXCSUM) != 0,
17976                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
17977                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17978                     tg3_flag(tp, ENABLE_ASF) != 0,
17979                     tg3_flag(tp, TSO_CAPABLE) != 0);
17980         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17981                     tp->dma_rwctrl,
17982                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17983                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17984
17985         pci_save_state(pdev);
17986
17987         return 0;
17988
17989 err_out_apeunmap:
17990         if (tp->aperegs) {
17991                 iounmap(tp->aperegs);
17992                 tp->aperegs = NULL;
17993         }
17994
17995 err_out_iounmap:
17996         if (tp->regs) {
17997                 iounmap(tp->regs);
17998                 tp->regs = NULL;
17999         }
18000
18001 err_out_free_dev:
18002         free_netdev(dev);
18003
18004 err_out_free_res:
18005         pci_release_regions(pdev);
18006
18007 err_out_disable_pdev:
18008         if (pci_is_enabled(pdev))
18009                 pci_disable_device(pdev);
18010         return err;
18011 }
18012
18013 static void tg3_remove_one(struct pci_dev *pdev)
18014 {
18015         struct net_device *dev = pci_get_drvdata(pdev);
18016
18017         if (dev) {
18018                 struct tg3 *tp = netdev_priv(dev);
18019
18020                 tg3_ptp_fini(tp);
18021
18022                 release_firmware(tp->fw);
18023
18024                 tg3_reset_task_cancel(tp);
18025
18026                 if (tg3_flag(tp, USE_PHYLIB)) {
18027                         tg3_phy_fini(tp);
18028                         tg3_mdio_fini(tp);
18029                 }
18030
18031                 unregister_netdev(dev);
18032                 if (tp->aperegs) {
18033                         iounmap(tp->aperegs);
18034                         tp->aperegs = NULL;
18035                 }
18036                 if (tp->regs) {
18037                         iounmap(tp->regs);
18038                         tp->regs = NULL;
18039                 }
18040                 free_netdev(dev);
18041                 pci_release_regions(pdev);
18042                 pci_disable_device(pdev);
18043         }
18044 }
18045
18046 #ifdef CONFIG_PM_SLEEP
18047 static int tg3_suspend(struct device *device)
18048 {
18049         struct pci_dev *pdev = to_pci_dev(device);
18050         struct net_device *dev = pci_get_drvdata(pdev);
18051         struct tg3 *tp = netdev_priv(dev);
18052         int err = 0;
18053
18054         rtnl_lock();
18055
18056         if (!netif_running(dev))
18057                 goto unlock;
18058
18059         tg3_reset_task_cancel(tp);
18060         tg3_phy_stop(tp);
18061         tg3_netif_stop(tp);
18062
18063         tg3_timer_stop(tp);
18064
18065         tg3_full_lock(tp, 1);
18066         tg3_disable_ints(tp);
18067         tg3_full_unlock(tp);
18068
18069         netif_device_detach(dev);
18070
18071         tg3_full_lock(tp, 0);
18072         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18073         tg3_flag_clear(tp, INIT_COMPLETE);
18074         tg3_full_unlock(tp);
18075
18076         err = tg3_power_down_prepare(tp);
18077         if (err) {
18078                 int err2;
18079
18080                 tg3_full_lock(tp, 0);
18081
18082                 tg3_flag_set(tp, INIT_COMPLETE);
18083                 err2 = tg3_restart_hw(tp, true);
18084                 if (err2)
18085                         goto out;
18086
18087                 tg3_timer_start(tp);
18088
18089                 netif_device_attach(dev);
18090                 tg3_netif_start(tp);
18091
18092 out:
18093                 tg3_full_unlock(tp);
18094
18095                 if (!err2)
18096                         tg3_phy_start(tp);
18097         }
18098
18099 unlock:
18100         rtnl_unlock();
18101         return err;
18102 }
18103
18104 static int tg3_resume(struct device *device)
18105 {
18106         struct pci_dev *pdev = to_pci_dev(device);
18107         struct net_device *dev = pci_get_drvdata(pdev);
18108         struct tg3 *tp = netdev_priv(dev);
18109         int err = 0;
18110
18111         rtnl_lock();
18112
18113         if (!netif_running(dev))
18114                 goto unlock;
18115
18116         netif_device_attach(dev);
18117
18118         tg3_full_lock(tp, 0);
18119
18120         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18121
18122         tg3_flag_set(tp, INIT_COMPLETE);
18123         err = tg3_restart_hw(tp,
18124                              !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18125         if (err)
18126                 goto out;
18127
18128         tg3_timer_start(tp);
18129
18130         tg3_netif_start(tp);
18131
18132 out:
18133         tg3_full_unlock(tp);
18134
18135         if (!err)
18136                 tg3_phy_start(tp);
18137
18138 unlock:
18139         rtnl_unlock();
18140         return err;
18141 }
18142 #endif /* CONFIG_PM_SLEEP */
18143
18144 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18145
18146 static void tg3_shutdown(struct pci_dev *pdev)
18147 {
18148         struct net_device *dev = pci_get_drvdata(pdev);
18149         struct tg3 *tp = netdev_priv(dev);
18150
18151         tg3_reset_task_cancel(tp);
18152
18153         rtnl_lock();
18154
18155         netif_device_detach(dev);
18156
18157         if (netif_running(dev))
18158                 dev_close(dev);
18159
18160         tg3_power_down(tp);
18161
18162         rtnl_unlock();
18163
18164         pci_disable_device(pdev);
18165 }
18166
18167 /**
18168  * tg3_io_error_detected - called when PCI error is detected
18169  * @pdev: Pointer to PCI device
18170  * @state: The current pci connection state
18171  *
18172  * This function is called after a PCI bus error affecting
18173  * this device has been detected.
18174  */
18175 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18176                                               pci_channel_state_t state)
18177 {
18178         struct net_device *netdev = pci_get_drvdata(pdev);
18179         struct tg3 *tp = netdev_priv(netdev);
18180         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18181
18182         netdev_info(netdev, "PCI I/O error detected\n");
18183
18184         rtnl_lock();
18185
18186         /* Could be second call or maybe we don't have netdev yet */
18187         if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
18188                 goto done;
18189
18190         /* We needn't recover from permanent error */
18191         if (state == pci_channel_io_frozen)
18192                 tp->pcierr_recovery = true;
18193
18194         tg3_phy_stop(tp);
18195
18196         tg3_netif_stop(tp);
18197
18198         tg3_timer_stop(tp);
18199
18200         /* Want to make sure that the reset task doesn't run */
18201         tg3_reset_task_cancel(tp);
18202
18203         netif_device_detach(netdev);
18204
18205         /* Clean up software state, even if MMIO is blocked */
18206         tg3_full_lock(tp, 0);
18207         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18208         tg3_full_unlock(tp);
18209
18210 done:
18211         if (state == pci_channel_io_perm_failure) {
18212                 if (netdev) {
18213                         tg3_napi_enable(tp);
18214                         dev_close(netdev);
18215                 }
18216                 err = PCI_ERS_RESULT_DISCONNECT;
18217         } else {
18218                 pci_disable_device(pdev);
18219         }
18220
18221         rtnl_unlock();
18222
18223         return err;
18224 }
18225
18226 /**
18227  * tg3_io_slot_reset - called after the pci bus has been reset.
18228  * @pdev: Pointer to PCI device
18229  *
18230  * Restart the card from scratch, as if from a cold-boot.
18231  * At this point, the card has exprienced a hard reset,
18232  * followed by fixups by BIOS, and has its config space
18233  * set up identically to what it was at cold boot.
18234  */
18235 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18236 {
18237         struct net_device *netdev = pci_get_drvdata(pdev);
18238         struct tg3 *tp = netdev_priv(netdev);
18239         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18240         int err;
18241
18242         rtnl_lock();
18243
18244         if (pci_enable_device(pdev)) {
18245                 dev_err(&pdev->dev,
18246                         "Cannot re-enable PCI device after reset.\n");
18247                 goto done;
18248         }
18249
18250         pci_set_master(pdev);
18251         pci_restore_state(pdev);
18252         pci_save_state(pdev);
18253
18254         if (!netdev || !netif_running(netdev)) {
18255                 rc = PCI_ERS_RESULT_RECOVERED;
18256                 goto done;
18257         }
18258
18259         err = tg3_power_up(tp);
18260         if (err)
18261                 goto done;
18262
18263         rc = PCI_ERS_RESULT_RECOVERED;
18264
18265 done:
18266         if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18267                 tg3_napi_enable(tp);
18268                 dev_close(netdev);
18269         }
18270         rtnl_unlock();
18271
18272         return rc;
18273 }
18274
18275 /**
18276  * tg3_io_resume - called when traffic can start flowing again.
18277  * @pdev: Pointer to PCI device
18278  *
18279  * This callback is called when the error recovery driver tells
18280  * us that its OK to resume normal operation.
18281  */
18282 static void tg3_io_resume(struct pci_dev *pdev)
18283 {
18284         struct net_device *netdev = pci_get_drvdata(pdev);
18285         struct tg3 *tp = netdev_priv(netdev);
18286         int err;
18287
18288         rtnl_lock();
18289
18290         if (!netdev || !netif_running(netdev))
18291                 goto done;
18292
18293         tg3_full_lock(tp, 0);
18294         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18295         tg3_flag_set(tp, INIT_COMPLETE);
18296         err = tg3_restart_hw(tp, true);
18297         if (err) {
18298                 tg3_full_unlock(tp);
18299                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18300                 goto done;
18301         }
18302
18303         netif_device_attach(netdev);
18304
18305         tg3_timer_start(tp);
18306
18307         tg3_netif_start(tp);
18308
18309         tg3_full_unlock(tp);
18310
18311         tg3_phy_start(tp);
18312
18313 done:
18314         tp->pcierr_recovery = false;
18315         rtnl_unlock();
18316 }
18317
18318 static const struct pci_error_handlers tg3_err_handler = {
18319         .error_detected = tg3_io_error_detected,
18320         .slot_reset     = tg3_io_slot_reset,
18321         .resume         = tg3_io_resume
18322 };
18323
18324 static struct pci_driver tg3_driver = {
18325         .name           = DRV_MODULE_NAME,
18326         .id_table       = tg3_pci_tbl,
18327         .probe          = tg3_init_one,
18328         .remove         = tg3_remove_one,
18329         .err_handler    = &tg3_err_handler,
18330         .driver.pm      = &tg3_pm_ops,
18331         .shutdown       = tg3_shutdown,
18332 };
18333
18334 module_pci_driver(tg3_driver);