2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2016 Broadcom Corporation.
8 * Copyright (C) 2016-2017 Broadcom Limited.
9 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10 * refers to Broadcom Inc. and/or its subsidiaries.
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17 #include <linux/stringify.h>
18 #include <linux/kernel.h>
19 #include <linux/sched/signal.h>
20 #include <linux/types.h>
21 #include <linux/compiler.h>
22 #include <linux/slab.h>
23 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/ioport.h>
27 #include <linux/pci.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/skbuff.h>
31 #include <linux/ethtool.h>
32 #include <linux/mdio.h>
33 #include <linux/mii.h>
34 #include <linux/phy.h>
35 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/firmware.h>
44 #include <linux/ssb/ssb_driver_gige.h>
45 #include <linux/hwmon.h>
46 #include <linux/hwmon-sysfs.h>
47 #include <linux/crc32poly.h>
49 #include <net/checksum.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
56 #include <uapi/linux/net_tstamp.h>
57 #include <linux/ptp_clock_kernel.h>
64 /* Functions & macros to verify TG3_FLAGS types */
66 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
68 return test_bit(flag, bits);
71 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
76 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
78 clear_bit(flag, bits);
81 #define tg3_flag(tp, flag) \
82 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
83 #define tg3_flag_set(tp, flag) \
84 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_clear(tp, flag) \
86 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
88 #define DRV_MODULE_NAME "tg3"
89 /* DO NOT UPDATE TG3_*_NUM defines */
91 #define TG3_MIN_NUM 137
93 #define RESET_KIND_SHUTDOWN 0
94 #define RESET_KIND_INIT 1
95 #define RESET_KIND_SUSPEND 2
97 #define TG3_DEF_RX_MODE 0
98 #define TG3_DEF_TX_MODE 0
99 #define TG3_DEF_MSG_ENABLE \
109 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
111 /* length of time before we decide the hardware is borked,
112 * and dev->tx_timeout() should be called to fix the problem
115 #define TG3_TX_TIMEOUT (5 * HZ)
117 /* hardware minimum and maximum for a single frame's data payload */
118 #define TG3_MIN_MTU ETH_ZLEN
119 #define TG3_MAX_MTU(tp) \
120 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
122 /* These numbers seem to be hard coded in the NIC firmware somehow.
123 * You can't change the ring sizes, but you can change where you place
124 * them in the NIC onboard memory.
126 #define TG3_RX_STD_RING_SIZE(tp) \
127 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
128 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
129 #define TG3_DEF_RX_RING_PENDING 200
130 #define TG3_RX_JMB_RING_SIZE(tp) \
131 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
133 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
135 /* Do not place this n-ring entries value into the tp struct itself,
136 * we really want to expose these constants to GCC so that modulo et
137 * al. operations are done with shifts and masks instead of with
138 * hw multiply/modulo instructions. Another solution would be to
139 * replace things like '% foo' with '& (foo - 1)'.
142 #define TG3_TX_RING_SIZE 512
143 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
145 #define TG3_RX_STD_RING_BYTES(tp) \
146 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
147 #define TG3_RX_JMB_RING_BYTES(tp) \
148 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
149 #define TG3_RX_RCB_RING_BYTES(tp) \
150 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
151 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
153 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
155 #define TG3_DMA_BYTE_ENAB 64
157 #define TG3_RX_STD_DMA_SZ 1536
158 #define TG3_RX_JMB_DMA_SZ 9046
160 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
162 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
163 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
165 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
166 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
168 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
169 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
171 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
172 * that are at least dword aligned when used in PCIX mode. The driver
173 * works around this bug by double copying the packet. This workaround
174 * is built into the normal double copy length check for efficiency.
176 * However, the double copy is only necessary on those architectures
177 * where unaligned memory accesses are inefficient. For those architectures
178 * where unaligned memory accesses incur little penalty, we can reintegrate
179 * the 5701 in the normal rx path. Doing so saves a device structure
180 * dereference by hardcoding the double copy threshold in place.
182 #define TG3_RX_COPY_THRESHOLD 256
183 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
184 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
186 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
189 #if (NET_IP_ALIGN != 0)
190 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
192 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
195 /* minimum number of free TX descriptors required to wake up TX process */
196 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
197 #define TG3_TX_BD_DMA_MAX_2K 2048
198 #define TG3_TX_BD_DMA_MAX_4K 4096
200 #define TG3_RAW_IP_ALIGN 2
202 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
203 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
205 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
206 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
208 #define FIRMWARE_TG3 "/*(DEBLOBBED)*/"
209 #define FIRMWARE_TG357766 "/*(DEBLOBBED)*/"
210 #define FIRMWARE_TG3TSO "/*(DEBLOBBED)*/"
211 #define FIRMWARE_TG3TSO5 "/*(DEBLOBBED)*/"
213 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
214 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
215 MODULE_LICENSE("GPL");
218 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
219 module_param(tg3_debug, int, 0);
220 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
222 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
223 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
225 static const struct pci_device_id tg3_pci_tbl[] = {
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
245 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
246 TG3_DRV_DATA_FLAG_5705_10_100},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
248 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
249 TG3_DRV_DATA_FLAG_5705_10_100},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
252 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
253 TG3_DRV_DATA_FLAG_5705_10_100},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
260 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
266 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
274 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
275 PCI_VENDOR_ID_LENOVO,
276 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
277 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
280 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
299 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
300 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
301 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
302 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
303 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
304 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
308 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
313 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
314 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
315 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
316 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
318 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
320 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
333 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
334 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
335 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
336 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
337 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
338 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
339 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
340 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
344 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
346 static const struct {
347 const char string[ETH_GSTRING_LEN];
348 } ethtool_stats_keys[] = {
351 { "rx_ucast_packets" },
352 { "rx_mcast_packets" },
353 { "rx_bcast_packets" },
355 { "rx_align_errors" },
356 { "rx_xon_pause_rcvd" },
357 { "rx_xoff_pause_rcvd" },
358 { "rx_mac_ctrl_rcvd" },
359 { "rx_xoff_entered" },
360 { "rx_frame_too_long_errors" },
362 { "rx_undersize_packets" },
363 { "rx_in_length_errors" },
364 { "rx_out_length_errors" },
365 { "rx_64_or_less_octet_packets" },
366 { "rx_65_to_127_octet_packets" },
367 { "rx_128_to_255_octet_packets" },
368 { "rx_256_to_511_octet_packets" },
369 { "rx_512_to_1023_octet_packets" },
370 { "rx_1024_to_1522_octet_packets" },
371 { "rx_1523_to_2047_octet_packets" },
372 { "rx_2048_to_4095_octet_packets" },
373 { "rx_4096_to_8191_octet_packets" },
374 { "rx_8192_to_9022_octet_packets" },
381 { "tx_flow_control" },
383 { "tx_single_collisions" },
384 { "tx_mult_collisions" },
386 { "tx_excessive_collisions" },
387 { "tx_late_collisions" },
388 { "tx_collide_2times" },
389 { "tx_collide_3times" },
390 { "tx_collide_4times" },
391 { "tx_collide_5times" },
392 { "tx_collide_6times" },
393 { "tx_collide_7times" },
394 { "tx_collide_8times" },
395 { "tx_collide_9times" },
396 { "tx_collide_10times" },
397 { "tx_collide_11times" },
398 { "tx_collide_12times" },
399 { "tx_collide_13times" },
400 { "tx_collide_14times" },
401 { "tx_collide_15times" },
402 { "tx_ucast_packets" },
403 { "tx_mcast_packets" },
404 { "tx_bcast_packets" },
405 { "tx_carrier_sense_errors" },
409 { "dma_writeq_full" },
410 { "dma_write_prioq_full" },
414 { "rx_threshold_hit" },
416 { "dma_readq_full" },
417 { "dma_read_prioq_full" },
418 { "tx_comp_queue_full" },
420 { "ring_set_send_prod_index" },
421 { "ring_status_update" },
423 { "nic_avoided_irqs" },
424 { "nic_tx_threshold_hit" },
426 { "mbuf_lwm_thresh_hit" },
429 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
430 #define TG3_NVRAM_TEST 0
431 #define TG3_LINK_TEST 1
432 #define TG3_REGISTER_TEST 2
433 #define TG3_MEMORY_TEST 3
434 #define TG3_MAC_LOOPB_TEST 4
435 #define TG3_PHY_LOOPB_TEST 5
436 #define TG3_EXT_LOOPB_TEST 6
437 #define TG3_INTERRUPT_TEST 7
440 static const struct {
441 const char string[ETH_GSTRING_LEN];
442 } ethtool_test_keys[] = {
443 [TG3_NVRAM_TEST] = { "nvram test (online) " },
444 [TG3_LINK_TEST] = { "link test (online) " },
445 [TG3_REGISTER_TEST] = { "register test (offline)" },
446 [TG3_MEMORY_TEST] = { "memory test (offline)" },
447 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
448 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
449 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
450 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
453 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
456 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
458 writel(val, tp->regs + off);
461 static u32 tg3_read32(struct tg3 *tp, u32 off)
463 return readl(tp->regs + off);
466 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
468 writel(val, tp->aperegs + off);
471 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
473 return readl(tp->aperegs + off);
476 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
480 spin_lock_irqsave(&tp->indirect_lock, flags);
481 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
482 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
483 spin_unlock_irqrestore(&tp->indirect_lock, flags);
486 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
488 writel(val, tp->regs + off);
489 readl(tp->regs + off);
492 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
497 spin_lock_irqsave(&tp->indirect_lock, flags);
498 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
499 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
500 spin_unlock_irqrestore(&tp->indirect_lock, flags);
504 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
508 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
509 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
510 TG3_64BIT_REG_LOW, val);
513 if (off == TG3_RX_STD_PROD_IDX_REG) {
514 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
515 TG3_64BIT_REG_LOW, val);
519 spin_lock_irqsave(&tp->indirect_lock, flags);
520 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
521 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
522 spin_unlock_irqrestore(&tp->indirect_lock, flags);
524 /* In indirect mode when disabling interrupts, we also need
525 * to clear the interrupt bit in the GRC local ctrl register.
527 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
529 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
530 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
534 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
539 spin_lock_irqsave(&tp->indirect_lock, flags);
540 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
541 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
542 spin_unlock_irqrestore(&tp->indirect_lock, flags);
546 /* usec_wait specifies the wait time in usec when writing to certain registers
547 * where it is unsafe to read back the register without some delay.
548 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
549 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
551 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
553 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
554 /* Non-posted methods */
555 tp->write32(tp, off, val);
558 tg3_write32(tp, off, val);
563 /* Wait again after the read for the posted method to guarantee that
564 * the wait time is met.
570 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
572 tp->write32_mbox(tp, off, val);
573 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
574 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
575 !tg3_flag(tp, ICH_WORKAROUND)))
576 tp->read32_mbox(tp, off);
579 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
581 void __iomem *mbox = tp->regs + off;
583 if (tg3_flag(tp, TXD_MBOX_HWBUG))
585 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
586 tg3_flag(tp, FLUSH_POSTED_WRITES))
590 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
592 return readl(tp->regs + off + GRCMBOX_BASE);
595 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
597 writel(val, tp->regs + off + GRCMBOX_BASE);
600 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
601 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
602 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
603 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
604 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
606 #define tw32(reg, val) tp->write32(tp, reg, val)
607 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
608 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
609 #define tr32(reg) tp->read32(tp, reg)
611 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
615 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
616 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
619 spin_lock_irqsave(&tp->indirect_lock, flags);
620 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
621 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
622 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
624 /* Always leave this as zero. */
625 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
627 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
628 tw32_f(TG3PCI_MEM_WIN_DATA, val);
630 /* Always leave this as zero. */
631 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
633 spin_unlock_irqrestore(&tp->indirect_lock, flags);
636 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
640 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
641 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
646 spin_lock_irqsave(&tp->indirect_lock, flags);
647 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
648 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
649 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
651 /* Always leave this as zero. */
652 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
654 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
655 *val = tr32(TG3PCI_MEM_WIN_DATA);
657 /* Always leave this as zero. */
658 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
660 spin_unlock_irqrestore(&tp->indirect_lock, flags);
663 static void tg3_ape_lock_init(struct tg3 *tp)
668 if (tg3_asic_rev(tp) == ASIC_REV_5761)
669 regbase = TG3_APE_LOCK_GRANT;
671 regbase = TG3_APE_PER_LOCK_GRANT;
673 /* Make sure the driver hasn't any stale locks. */
674 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
676 case TG3_APE_LOCK_PHY0:
677 case TG3_APE_LOCK_PHY1:
678 case TG3_APE_LOCK_PHY2:
679 case TG3_APE_LOCK_PHY3:
680 bit = APE_LOCK_GRANT_DRIVER;
684 bit = APE_LOCK_GRANT_DRIVER;
686 bit = 1 << tp->pci_fn;
688 tg3_ape_write32(tp, regbase + 4 * i, bit);
693 static int tg3_ape_lock(struct tg3 *tp, int locknum)
697 u32 status, req, gnt, bit;
699 if (!tg3_flag(tp, ENABLE_APE))
703 case TG3_APE_LOCK_GPIO:
704 if (tg3_asic_rev(tp) == ASIC_REV_5761)
707 case TG3_APE_LOCK_GRC:
708 case TG3_APE_LOCK_MEM:
710 bit = APE_LOCK_REQ_DRIVER;
712 bit = 1 << tp->pci_fn;
714 case TG3_APE_LOCK_PHY0:
715 case TG3_APE_LOCK_PHY1:
716 case TG3_APE_LOCK_PHY2:
717 case TG3_APE_LOCK_PHY3:
718 bit = APE_LOCK_REQ_DRIVER;
724 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
725 req = TG3_APE_LOCK_REQ;
726 gnt = TG3_APE_LOCK_GRANT;
728 req = TG3_APE_PER_LOCK_REQ;
729 gnt = TG3_APE_PER_LOCK_GRANT;
734 tg3_ape_write32(tp, req + off, bit);
736 /* Wait for up to 1 millisecond to acquire lock. */
737 for (i = 0; i < 100; i++) {
738 status = tg3_ape_read32(tp, gnt + off);
741 if (pci_channel_offline(tp->pdev))
748 /* Revoke the lock request. */
749 tg3_ape_write32(tp, gnt + off, bit);
756 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
760 if (!tg3_flag(tp, ENABLE_APE))
764 case TG3_APE_LOCK_GPIO:
765 if (tg3_asic_rev(tp) == ASIC_REV_5761)
768 case TG3_APE_LOCK_GRC:
769 case TG3_APE_LOCK_MEM:
771 bit = APE_LOCK_GRANT_DRIVER;
773 bit = 1 << tp->pci_fn;
775 case TG3_APE_LOCK_PHY0:
776 case TG3_APE_LOCK_PHY1:
777 case TG3_APE_LOCK_PHY2:
778 case TG3_APE_LOCK_PHY3:
779 bit = APE_LOCK_GRANT_DRIVER;
785 if (tg3_asic_rev(tp) == ASIC_REV_5761)
786 gnt = TG3_APE_LOCK_GRANT;
788 gnt = TG3_APE_PER_LOCK_GRANT;
790 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
793 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
798 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
801 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
802 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
805 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
808 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
811 return timeout_us ? 0 : -EBUSY;
814 #ifdef CONFIG_TIGON3_HWMON
815 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
819 for (i = 0; i < timeout_us / 10; i++) {
820 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
822 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
828 return i == timeout_us / 10;
831 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
835 u32 i, bufoff, msgoff, maxlen, apedata;
837 if (!tg3_flag(tp, APE_HAS_NCSI))
840 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
841 if (apedata != APE_SEG_SIG_MAGIC)
844 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
845 if (!(apedata & APE_FW_STATUS_READY))
848 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
850 msgoff = bufoff + 2 * sizeof(u32);
851 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
856 /* Cap xfer sizes to scratchpad limits. */
857 length = (len > maxlen) ? maxlen : len;
860 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
861 if (!(apedata & APE_FW_STATUS_READY))
864 /* Wait for up to 1 msec for APE to service previous event. */
865 err = tg3_ape_event_lock(tp, 1000);
869 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
870 APE_EVENT_STATUS_SCRTCHPD_READ |
871 APE_EVENT_STATUS_EVENT_PENDING;
872 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
874 tg3_ape_write32(tp, bufoff, base_off);
875 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
877 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
878 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
882 if (tg3_ape_wait_for_event(tp, 30000))
885 for (i = 0; length; i += 4, length -= 4) {
886 u32 val = tg3_ape_read32(tp, msgoff + i);
887 memcpy(data, &val, sizeof(u32));
896 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
901 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
902 if (apedata != APE_SEG_SIG_MAGIC)
905 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
906 if (!(apedata & APE_FW_STATUS_READY))
909 /* Wait for up to 20 millisecond for APE to service previous event. */
910 err = tg3_ape_event_lock(tp, 20000);
914 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
915 event | APE_EVENT_STATUS_EVENT_PENDING);
917 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
918 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
923 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
928 if (!tg3_flag(tp, ENABLE_APE))
932 case RESET_KIND_INIT:
933 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
934 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
935 APE_HOST_SEG_SIG_MAGIC);
936 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
937 APE_HOST_SEG_LEN_MAGIC);
938 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
939 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
940 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
941 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
942 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
943 APE_HOST_BEHAV_NO_PHYLOCK);
944 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
945 TG3_APE_HOST_DRVR_STATE_START);
947 event = APE_EVENT_STATUS_STATE_START;
949 case RESET_KIND_SHUTDOWN:
950 if (device_may_wakeup(&tp->pdev->dev) &&
951 tg3_flag(tp, WOL_ENABLE)) {
952 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
953 TG3_APE_HOST_WOL_SPEED_AUTO);
954 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
956 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
958 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
960 event = APE_EVENT_STATUS_STATE_UNLOAD;
966 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
968 tg3_ape_send_event(tp, event);
971 static void tg3_send_ape_heartbeat(struct tg3 *tp,
972 unsigned long interval)
974 /* Check if hb interval has exceeded */
975 if (!tg3_flag(tp, ENABLE_APE) ||
976 time_before(jiffies, tp->ape_hb_jiffies + interval))
979 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
980 tp->ape_hb_jiffies = jiffies;
983 static void tg3_disable_ints(struct tg3 *tp)
987 tw32(TG3PCI_MISC_HOST_CTRL,
988 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
989 for (i = 0; i < tp->irq_max; i++)
990 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
993 static void tg3_enable_ints(struct tg3 *tp)
1000 tw32(TG3PCI_MISC_HOST_CTRL,
1001 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1003 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1004 for (i = 0; i < tp->irq_cnt; i++) {
1005 struct tg3_napi *tnapi = &tp->napi[i];
1007 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1008 if (tg3_flag(tp, 1SHOT_MSI))
1009 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1011 tp->coal_now |= tnapi->coal_now;
1014 /* Force an initial interrupt */
1015 if (!tg3_flag(tp, TAGGED_STATUS) &&
1016 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1017 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1019 tw32(HOSTCC_MODE, tp->coal_now);
1021 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1024 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1026 struct tg3 *tp = tnapi->tp;
1027 struct tg3_hw_status *sblk = tnapi->hw_status;
1028 unsigned int work_exists = 0;
1030 /* check for phy events */
1031 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1032 if (sblk->status & SD_STATUS_LINK_CHG)
1036 /* check for TX work to do */
1037 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1040 /* check for RX work to do */
1041 if (tnapi->rx_rcb_prod_idx &&
1042 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1049 * similar to tg3_enable_ints, but it accurately determines whether there
1050 * is new work pending and can return without flushing the PIO write
1051 * which reenables interrupts
1053 static void tg3_int_reenable(struct tg3_napi *tnapi)
1055 struct tg3 *tp = tnapi->tp;
1057 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1059 /* When doing tagged status, this work check is unnecessary.
1060 * The last_tag we write above tells the chip which piece of
1061 * work we've completed.
1063 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1064 tw32(HOSTCC_MODE, tp->coalesce_mode |
1065 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1068 static void tg3_switch_clocks(struct tg3 *tp)
1071 u32 orig_clock_ctrl;
1073 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1076 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1078 orig_clock_ctrl = clock_ctrl;
1079 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1080 CLOCK_CTRL_CLKRUN_OENABLE |
1082 tp->pci_clock_ctrl = clock_ctrl;
1084 if (tg3_flag(tp, 5705_PLUS)) {
1085 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1086 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1087 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1089 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1090 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1092 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1094 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1095 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1098 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1101 #define PHY_BUSY_LOOPS 5000
1103 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1110 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1112 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1116 tg3_ape_lock(tp, tp->phy_ape_lock);
1120 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1121 MI_COM_PHY_ADDR_MASK);
1122 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1123 MI_COM_REG_ADDR_MASK);
1124 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1126 tw32_f(MAC_MI_COM, frame_val);
1128 loops = PHY_BUSY_LOOPS;
1129 while (loops != 0) {
1131 frame_val = tr32(MAC_MI_COM);
1133 if ((frame_val & MI_COM_BUSY) == 0) {
1135 frame_val = tr32(MAC_MI_COM);
1143 *val = frame_val & MI_COM_DATA_MASK;
1147 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1148 tw32_f(MAC_MI_MODE, tp->mi_mode);
1152 tg3_ape_unlock(tp, tp->phy_ape_lock);
1157 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1159 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1162 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1169 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1170 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1173 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1175 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1179 tg3_ape_lock(tp, tp->phy_ape_lock);
1181 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1182 MI_COM_PHY_ADDR_MASK);
1183 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1184 MI_COM_REG_ADDR_MASK);
1185 frame_val |= (val & MI_COM_DATA_MASK);
1186 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1188 tw32_f(MAC_MI_COM, frame_val);
1190 loops = PHY_BUSY_LOOPS;
1191 while (loops != 0) {
1193 frame_val = tr32(MAC_MI_COM);
1194 if ((frame_val & MI_COM_BUSY) == 0) {
1196 frame_val = tr32(MAC_MI_COM);
1206 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1207 tw32_f(MAC_MI_MODE, tp->mi_mode);
1211 tg3_ape_unlock(tp, tp->phy_ape_lock);
1216 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1218 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1221 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1225 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1229 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1233 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1234 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1238 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1244 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1248 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1252 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1256 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1257 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1261 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1267 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1271 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1273 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1278 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1282 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1284 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1289 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1293 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1294 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1295 MII_TG3_AUXCTL_SHDWSEL_MISC);
1297 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1302 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1304 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1305 set |= MII_TG3_AUXCTL_MISC_WREN;
1307 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1310 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1315 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1321 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1323 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1325 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1326 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1331 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1333 return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1334 reg | val | MII_TG3_MISC_SHDW_WREN);
1337 static int tg3_bmcr_reset(struct tg3 *tp)
1342 /* OK, reset it, and poll the BMCR_RESET bit until it
1343 * clears or we time out.
1345 phy_control = BMCR_RESET;
1346 err = tg3_writephy(tp, MII_BMCR, phy_control);
1352 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1356 if ((phy_control & BMCR_RESET) == 0) {
1368 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1370 struct tg3 *tp = bp->priv;
1373 spin_lock_bh(&tp->lock);
1375 if (__tg3_readphy(tp, mii_id, reg, &val))
1378 spin_unlock_bh(&tp->lock);
1383 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1385 struct tg3 *tp = bp->priv;
1388 spin_lock_bh(&tp->lock);
1390 if (__tg3_writephy(tp, mii_id, reg, val))
1393 spin_unlock_bh(&tp->lock);
1398 static void tg3_mdio_config_5785(struct tg3 *tp)
1401 struct phy_device *phydev;
1403 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1404 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1405 case PHY_ID_BCM50610:
1406 case PHY_ID_BCM50610M:
1407 val = MAC_PHYCFG2_50610_LED_MODES;
1409 case PHY_ID_BCMAC131:
1410 val = MAC_PHYCFG2_AC131_LED_MODES;
1412 case PHY_ID_RTL8211C:
1413 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1415 case PHY_ID_RTL8201E:
1416 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1422 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1423 tw32(MAC_PHYCFG2, val);
1425 val = tr32(MAC_PHYCFG1);
1426 val &= ~(MAC_PHYCFG1_RGMII_INT |
1427 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1428 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1429 tw32(MAC_PHYCFG1, val);
1434 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1435 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1436 MAC_PHYCFG2_FMODE_MASK_MASK |
1437 MAC_PHYCFG2_GMODE_MASK_MASK |
1438 MAC_PHYCFG2_ACT_MASK_MASK |
1439 MAC_PHYCFG2_QUAL_MASK_MASK |
1440 MAC_PHYCFG2_INBAND_ENABLE;
1442 tw32(MAC_PHYCFG2, val);
1444 val = tr32(MAC_PHYCFG1);
1445 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1446 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1447 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1448 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1449 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1450 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1451 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1453 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1454 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1455 tw32(MAC_PHYCFG1, val);
1457 val = tr32(MAC_EXT_RGMII_MODE);
1458 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1459 MAC_RGMII_MODE_RX_QUALITY |
1460 MAC_RGMII_MODE_RX_ACTIVITY |
1461 MAC_RGMII_MODE_RX_ENG_DET |
1462 MAC_RGMII_MODE_TX_ENABLE |
1463 MAC_RGMII_MODE_TX_LOWPWR |
1464 MAC_RGMII_MODE_TX_RESET);
1465 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1466 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1467 val |= MAC_RGMII_MODE_RX_INT_B |
1468 MAC_RGMII_MODE_RX_QUALITY |
1469 MAC_RGMII_MODE_RX_ACTIVITY |
1470 MAC_RGMII_MODE_RX_ENG_DET;
1471 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1472 val |= MAC_RGMII_MODE_TX_ENABLE |
1473 MAC_RGMII_MODE_TX_LOWPWR |
1474 MAC_RGMII_MODE_TX_RESET;
1476 tw32(MAC_EXT_RGMII_MODE, val);
1479 static void tg3_mdio_start(struct tg3 *tp)
1481 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1482 tw32_f(MAC_MI_MODE, tp->mi_mode);
1485 if (tg3_flag(tp, MDIOBUS_INITED) &&
1486 tg3_asic_rev(tp) == ASIC_REV_5785)
1487 tg3_mdio_config_5785(tp);
1490 static int tg3_mdio_init(struct tg3 *tp)
1494 struct phy_device *phydev;
1496 if (tg3_flag(tp, 5717_PLUS)) {
1499 tp->phy_addr = tp->pci_fn + 1;
1501 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1502 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1504 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1505 TG3_CPMU_PHY_STRAP_IS_SERDES;
1508 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1511 addr = ssb_gige_get_phyaddr(tp->pdev);
1514 tp->phy_addr = addr;
1516 tp->phy_addr = TG3_PHY_MII_ADDR;
1520 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1523 tp->mdio_bus = mdiobus_alloc();
1524 if (tp->mdio_bus == NULL)
1527 tp->mdio_bus->name = "tg3 mdio bus";
1528 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1529 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1530 tp->mdio_bus->priv = tp;
1531 tp->mdio_bus->parent = &tp->pdev->dev;
1532 tp->mdio_bus->read = &tg3_mdio_read;
1533 tp->mdio_bus->write = &tg3_mdio_write;
1534 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1536 /* The bus registration will look for all the PHYs on the mdio bus.
1537 * Unfortunately, it does not ensure the PHY is powered up before
1538 * accessing the PHY ID registers. A chip reset is the
1539 * quickest way to bring the device back to an operational state..
1541 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1544 i = mdiobus_register(tp->mdio_bus);
1546 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1547 mdiobus_free(tp->mdio_bus);
1551 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1553 if (!phydev || !phydev->drv) {
1554 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1555 mdiobus_unregister(tp->mdio_bus);
1556 mdiobus_free(tp->mdio_bus);
1560 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1561 case PHY_ID_BCM57780:
1562 phydev->interface = PHY_INTERFACE_MODE_GMII;
1563 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1565 case PHY_ID_BCM50610:
1566 case PHY_ID_BCM50610M:
1567 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1568 PHY_BRCM_RX_REFCLK_UNUSED |
1569 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1570 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1572 case PHY_ID_RTL8211C:
1573 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1575 case PHY_ID_RTL8201E:
1576 case PHY_ID_BCMAC131:
1577 phydev->interface = PHY_INTERFACE_MODE_MII;
1578 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1579 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1583 tg3_flag_set(tp, MDIOBUS_INITED);
1585 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1586 tg3_mdio_config_5785(tp);
1591 static void tg3_mdio_fini(struct tg3 *tp)
1593 if (tg3_flag(tp, MDIOBUS_INITED)) {
1594 tg3_flag_clear(tp, MDIOBUS_INITED);
1595 mdiobus_unregister(tp->mdio_bus);
1596 mdiobus_free(tp->mdio_bus);
1600 /* tp->lock is held. */
1601 static inline void tg3_generate_fw_event(struct tg3 *tp)
1605 val = tr32(GRC_RX_CPU_EVENT);
1606 val |= GRC_RX_CPU_DRIVER_EVENT;
1607 tw32_f(GRC_RX_CPU_EVENT, val);
1609 tp->last_event_jiffies = jiffies;
1612 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1614 /* tp->lock is held. */
1615 static void tg3_wait_for_event_ack(struct tg3 *tp)
1618 unsigned int delay_cnt;
1621 /* If enough time has passed, no wait is necessary. */
1622 time_remain = (long)(tp->last_event_jiffies + 1 +
1623 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1625 if (time_remain < 0)
1628 /* Check if we can shorten the wait time. */
1629 delay_cnt = jiffies_to_usecs(time_remain);
1630 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1631 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1632 delay_cnt = (delay_cnt >> 3) + 1;
1634 for (i = 0; i < delay_cnt; i++) {
1635 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1637 if (pci_channel_offline(tp->pdev))
1644 /* tp->lock is held. */
1645 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1650 if (!tg3_readphy(tp, MII_BMCR, ®))
1652 if (!tg3_readphy(tp, MII_BMSR, ®))
1653 val |= (reg & 0xffff);
1657 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1659 if (!tg3_readphy(tp, MII_LPA, ®))
1660 val |= (reg & 0xffff);
1664 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1665 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1667 if (!tg3_readphy(tp, MII_STAT1000, ®))
1668 val |= (reg & 0xffff);
1672 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1679 /* tp->lock is held. */
1680 static void tg3_ump_link_report(struct tg3 *tp)
1684 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1687 tg3_phy_gather_ump_data(tp, data);
1689 tg3_wait_for_event_ack(tp);
1691 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1692 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1693 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1694 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1695 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1696 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1698 tg3_generate_fw_event(tp);
1701 /* tp->lock is held. */
1702 static void tg3_stop_fw(struct tg3 *tp)
1704 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1705 /* Wait for RX cpu to ACK the previous event. */
1706 tg3_wait_for_event_ack(tp);
1708 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1710 tg3_generate_fw_event(tp);
1712 /* Wait for RX cpu to ACK this event. */
1713 tg3_wait_for_event_ack(tp);
1717 /* tp->lock is held. */
1718 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1720 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1721 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1723 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1725 case RESET_KIND_INIT:
1726 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1730 case RESET_KIND_SHUTDOWN:
1731 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1735 case RESET_KIND_SUSPEND:
1736 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1746 /* tp->lock is held. */
1747 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1749 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1751 case RESET_KIND_INIT:
1752 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1753 DRV_STATE_START_DONE);
1756 case RESET_KIND_SHUTDOWN:
1757 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1758 DRV_STATE_UNLOAD_DONE);
1767 /* tp->lock is held. */
1768 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1770 if (tg3_flag(tp, ENABLE_ASF)) {
1772 case RESET_KIND_INIT:
1773 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1777 case RESET_KIND_SHUTDOWN:
1778 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1782 case RESET_KIND_SUSPEND:
1783 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1793 static int tg3_poll_fw(struct tg3 *tp)
1798 if (tg3_flag(tp, NO_FWARE_REPORTED))
1801 if (tg3_flag(tp, IS_SSB_CORE)) {
1802 /* We don't use firmware. */
1806 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1807 /* Wait up to 20ms for init done. */
1808 for (i = 0; i < 200; i++) {
1809 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1811 if (pci_channel_offline(tp->pdev))
1819 /* Wait for firmware initialization to complete. */
1820 for (i = 0; i < 100000; i++) {
1821 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1822 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1824 if (pci_channel_offline(tp->pdev)) {
1825 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1826 tg3_flag_set(tp, NO_FWARE_REPORTED);
1827 netdev_info(tp->dev, "No firmware running\n");
1836 /* Chip might not be fitted with firmware. Some Sun onboard
1837 * parts are configured like that. So don't signal the timeout
1838 * of the above loop as an error, but do report the lack of
1839 * running firmware once.
1841 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1842 tg3_flag_set(tp, NO_FWARE_REPORTED);
1844 netdev_info(tp->dev, "No firmware running\n");
1847 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1848 /* The 57765 A0 needs a little more
1849 * time to do some important work.
1857 static void tg3_link_report(struct tg3 *tp)
1859 if (!netif_carrier_ok(tp->dev)) {
1860 netif_info(tp, link, tp->dev, "Link is down\n");
1861 tg3_ump_link_report(tp);
1862 } else if (netif_msg_link(tp)) {
1863 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1864 (tp->link_config.active_speed == SPEED_1000 ?
1866 (tp->link_config.active_speed == SPEED_100 ?
1868 (tp->link_config.active_duplex == DUPLEX_FULL ?
1871 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1872 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1874 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1877 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1878 netdev_info(tp->dev, "EEE is %s\n",
1879 tp->setlpicnt ? "enabled" : "disabled");
1881 tg3_ump_link_report(tp);
1884 tp->link_up = netif_carrier_ok(tp->dev);
1887 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1891 if (adv & ADVERTISE_PAUSE_CAP) {
1892 flowctrl |= FLOW_CTRL_RX;
1893 if (!(adv & ADVERTISE_PAUSE_ASYM))
1894 flowctrl |= FLOW_CTRL_TX;
1895 } else if (adv & ADVERTISE_PAUSE_ASYM)
1896 flowctrl |= FLOW_CTRL_TX;
1901 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1905 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1906 miireg = ADVERTISE_1000XPAUSE;
1907 else if (flow_ctrl & FLOW_CTRL_TX)
1908 miireg = ADVERTISE_1000XPSE_ASYM;
1909 else if (flow_ctrl & FLOW_CTRL_RX)
1910 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1917 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1921 if (adv & ADVERTISE_1000XPAUSE) {
1922 flowctrl |= FLOW_CTRL_RX;
1923 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1924 flowctrl |= FLOW_CTRL_TX;
1925 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1926 flowctrl |= FLOW_CTRL_TX;
1931 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1935 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1936 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1937 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1938 if (lcladv & ADVERTISE_1000XPAUSE)
1940 if (rmtadv & ADVERTISE_1000XPAUSE)
1947 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1951 u32 old_rx_mode = tp->rx_mode;
1952 u32 old_tx_mode = tp->tx_mode;
1954 if (tg3_flag(tp, USE_PHYLIB))
1955 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1957 autoneg = tp->link_config.autoneg;
1959 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1960 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1961 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1963 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1965 flowctrl = tp->link_config.flowctrl;
1967 tp->link_config.active_flowctrl = flowctrl;
1969 if (flowctrl & FLOW_CTRL_RX)
1970 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1972 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1974 if (old_rx_mode != tp->rx_mode)
1975 tw32_f(MAC_RX_MODE, tp->rx_mode);
1977 if (flowctrl & FLOW_CTRL_TX)
1978 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1980 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1982 if (old_tx_mode != tp->tx_mode)
1983 tw32_f(MAC_TX_MODE, tp->tx_mode);
1986 static void tg3_adjust_link(struct net_device *dev)
1988 u8 oldflowctrl, linkmesg = 0;
1989 u32 mac_mode, lcl_adv, rmt_adv;
1990 struct tg3 *tp = netdev_priv(dev);
1991 struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1993 spin_lock_bh(&tp->lock);
1995 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1996 MAC_MODE_HALF_DUPLEX);
1998 oldflowctrl = tp->link_config.active_flowctrl;
2004 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2005 mac_mode |= MAC_MODE_PORT_MODE_MII;
2006 else if (phydev->speed == SPEED_1000 ||
2007 tg3_asic_rev(tp) != ASIC_REV_5785)
2008 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2010 mac_mode |= MAC_MODE_PORT_MODE_MII;
2012 if (phydev->duplex == DUPLEX_HALF)
2013 mac_mode |= MAC_MODE_HALF_DUPLEX;
2015 lcl_adv = mii_advertise_flowctrl(
2016 tp->link_config.flowctrl);
2019 rmt_adv = LPA_PAUSE_CAP;
2020 if (phydev->asym_pause)
2021 rmt_adv |= LPA_PAUSE_ASYM;
2024 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2026 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2028 if (mac_mode != tp->mac_mode) {
2029 tp->mac_mode = mac_mode;
2030 tw32_f(MAC_MODE, tp->mac_mode);
2034 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2035 if (phydev->speed == SPEED_10)
2037 MAC_MI_STAT_10MBPS_MODE |
2038 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2040 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2043 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2044 tw32(MAC_TX_LENGTHS,
2045 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2046 (6 << TX_LENGTHS_IPG_SHIFT) |
2047 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2049 tw32(MAC_TX_LENGTHS,
2050 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2051 (6 << TX_LENGTHS_IPG_SHIFT) |
2052 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2054 if (phydev->link != tp->old_link ||
2055 phydev->speed != tp->link_config.active_speed ||
2056 phydev->duplex != tp->link_config.active_duplex ||
2057 oldflowctrl != tp->link_config.active_flowctrl)
2060 tp->old_link = phydev->link;
2061 tp->link_config.active_speed = phydev->speed;
2062 tp->link_config.active_duplex = phydev->duplex;
2064 spin_unlock_bh(&tp->lock);
2067 tg3_link_report(tp);
2070 static int tg3_phy_init(struct tg3 *tp)
2072 struct phy_device *phydev;
2074 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2077 /* Bring the PHY back to a known state. */
2080 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2082 /* Attach the MAC to the PHY. */
2083 phydev = phy_connect(tp->dev, phydev_name(phydev),
2084 tg3_adjust_link, phydev->interface);
2085 if (IS_ERR(phydev)) {
2086 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2087 return PTR_ERR(phydev);
2090 /* Mask with MAC supported features. */
2091 switch (phydev->interface) {
2092 case PHY_INTERFACE_MODE_GMII:
2093 case PHY_INTERFACE_MODE_RGMII:
2094 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2095 phy_set_max_speed(phydev, SPEED_1000);
2096 phy_support_asym_pause(phydev);
2100 case PHY_INTERFACE_MODE_MII:
2101 phy_set_max_speed(phydev, SPEED_100);
2102 phy_support_asym_pause(phydev);
2105 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2109 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2111 phy_attached_info(phydev);
2116 static void tg3_phy_start(struct tg3 *tp)
2118 struct phy_device *phydev;
2120 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2123 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2125 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2126 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2127 phydev->speed = tp->link_config.speed;
2128 phydev->duplex = tp->link_config.duplex;
2129 phydev->autoneg = tp->link_config.autoneg;
2130 ethtool_convert_legacy_u32_to_link_mode(
2131 phydev->advertising, tp->link_config.advertising);
2136 phy_start_aneg(phydev);
2139 static void tg3_phy_stop(struct tg3 *tp)
2141 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2144 phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2147 static void tg3_phy_fini(struct tg3 *tp)
2149 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2150 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2151 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2155 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2160 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2163 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2164 /* Cannot do read-modify-write on 5401 */
2165 err = tg3_phy_auxctl_write(tp,
2166 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2167 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2172 err = tg3_phy_auxctl_read(tp,
2173 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2177 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2178 err = tg3_phy_auxctl_write(tp,
2179 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2185 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2189 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2192 tg3_writephy(tp, MII_TG3_FET_TEST,
2193 phytest | MII_TG3_FET_SHADOW_EN);
2194 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2196 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2198 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2199 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2201 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2205 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2209 if (!tg3_flag(tp, 5705_PLUS) ||
2210 (tg3_flag(tp, 5717_PLUS) &&
2211 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2214 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2215 tg3_phy_fet_toggle_apd(tp, enable);
2219 reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2220 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2221 MII_TG3_MISC_SHDW_SCR5_SDTL |
2222 MII_TG3_MISC_SHDW_SCR5_C125OE;
2223 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2224 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2226 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2229 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2231 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2233 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2236 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2240 if (!tg3_flag(tp, 5705_PLUS) ||
2241 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2244 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2247 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2248 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2250 tg3_writephy(tp, MII_TG3_FET_TEST,
2251 ephy | MII_TG3_FET_SHADOW_EN);
2252 if (!tg3_readphy(tp, reg, &phy)) {
2254 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2256 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2257 tg3_writephy(tp, reg, phy);
2259 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2264 ret = tg3_phy_auxctl_read(tp,
2265 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2268 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2270 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2271 tg3_phy_auxctl_write(tp,
2272 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2277 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2282 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2285 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2287 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2288 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2291 static void tg3_phy_apply_otp(struct tg3 *tp)
2300 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2303 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2304 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2305 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2307 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2308 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2309 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2311 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2312 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2313 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2315 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2316 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2318 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2319 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2321 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2322 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2323 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2325 tg3_phy_toggle_auxctl_smdsp(tp, false);
2328 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2331 struct ethtool_eee *dest = &tp->eee;
2333 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2339 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2342 /* Pull eee_active */
2343 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2344 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2345 dest->eee_active = 1;
2347 dest->eee_active = 0;
2349 /* Pull lp advertised settings */
2350 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2352 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2354 /* Pull advertised and eee_enabled settings */
2355 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2357 dest->eee_enabled = !!val;
2358 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2360 /* Pull tx_lpi_enabled */
2361 val = tr32(TG3_CPMU_EEE_MODE);
2362 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2364 /* Pull lpi timer value */
2365 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2368 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2372 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2377 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2379 tp->link_config.active_duplex == DUPLEX_FULL &&
2380 (tp->link_config.active_speed == SPEED_100 ||
2381 tp->link_config.active_speed == SPEED_1000)) {
2384 if (tp->link_config.active_speed == SPEED_1000)
2385 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2387 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2389 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2391 tg3_eee_pull_config(tp, NULL);
2392 if (tp->eee.eee_active)
2396 if (!tp->setlpicnt) {
2397 if (current_link_up &&
2398 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2399 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2400 tg3_phy_toggle_auxctl_smdsp(tp, false);
2403 val = tr32(TG3_CPMU_EEE_MODE);
2404 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2408 static void tg3_phy_eee_enable(struct tg3 *tp)
2412 if (tp->link_config.active_speed == SPEED_1000 &&
2413 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2414 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2415 tg3_flag(tp, 57765_CLASS)) &&
2416 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2417 val = MII_TG3_DSP_TAP26_ALNOKO |
2418 MII_TG3_DSP_TAP26_RMRXSTO;
2419 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2420 tg3_phy_toggle_auxctl_smdsp(tp, false);
2423 val = tr32(TG3_CPMU_EEE_MODE);
2424 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2427 static int tg3_wait_macro_done(struct tg3 *tp)
2434 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2435 if ((tmp32 & 0x1000) == 0)
2445 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2447 static const u32 test_pat[4][6] = {
2448 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2449 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2450 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2451 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2455 for (chan = 0; chan < 4; chan++) {
2458 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2459 (chan * 0x2000) | 0x0200);
2460 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2462 for (i = 0; i < 6; i++)
2463 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2466 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2467 if (tg3_wait_macro_done(tp)) {
2472 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2473 (chan * 0x2000) | 0x0200);
2474 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2475 if (tg3_wait_macro_done(tp)) {
2480 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2481 if (tg3_wait_macro_done(tp)) {
2486 for (i = 0; i < 6; i += 2) {
2489 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2490 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2491 tg3_wait_macro_done(tp)) {
2497 if (low != test_pat[chan][i] ||
2498 high != test_pat[chan][i+1]) {
2499 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2500 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2501 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2511 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2515 for (chan = 0; chan < 4; chan++) {
2518 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2519 (chan * 0x2000) | 0x0200);
2520 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2521 for (i = 0; i < 6; i++)
2522 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2523 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2524 if (tg3_wait_macro_done(tp))
2531 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2533 u32 reg32, phy9_orig;
2534 int retries, do_phy_reset, err;
2540 err = tg3_bmcr_reset(tp);
2546 /* Disable transmitter and interrupt. */
2547 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2551 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2553 /* Set full-duplex, 1000 mbps. */
2554 tg3_writephy(tp, MII_BMCR,
2555 BMCR_FULLDPLX | BMCR_SPEED1000);
2557 /* Set to master mode. */
2558 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2561 tg3_writephy(tp, MII_CTRL1000,
2562 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2564 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2568 /* Block the PHY control access. */
2569 tg3_phydsp_write(tp, 0x8005, 0x0800);
2571 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2574 } while (--retries);
2576 err = tg3_phy_reset_chanpat(tp);
2580 tg3_phydsp_write(tp, 0x8005, 0x0000);
2582 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2583 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2585 tg3_phy_toggle_auxctl_smdsp(tp, false);
2587 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2589 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32);
2594 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2599 static void tg3_carrier_off(struct tg3 *tp)
2601 netif_carrier_off(tp->dev);
2602 tp->link_up = false;
2605 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2607 if (tg3_flag(tp, ENABLE_ASF))
2608 netdev_warn(tp->dev,
2609 "Management side-band traffic will be interrupted during phy settings change\n");
2612 /* This will reset the tigon3 PHY if there is no valid
2613 * link unless the FORCE argument is non-zero.
2615 static int tg3_phy_reset(struct tg3 *tp)
2620 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2621 val = tr32(GRC_MISC_CFG);
2622 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2625 err = tg3_readphy(tp, MII_BMSR, &val);
2626 err |= tg3_readphy(tp, MII_BMSR, &val);
2630 if (netif_running(tp->dev) && tp->link_up) {
2631 netif_carrier_off(tp->dev);
2632 tg3_link_report(tp);
2635 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2636 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2637 tg3_asic_rev(tp) == ASIC_REV_5705) {
2638 err = tg3_phy_reset_5703_4_5(tp);
2645 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2646 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2647 cpmuctrl = tr32(TG3_CPMU_CTRL);
2648 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2650 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2653 err = tg3_bmcr_reset(tp);
2657 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2658 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2659 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2661 tw32(TG3_CPMU_CTRL, cpmuctrl);
2664 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2665 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2666 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2667 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2668 CPMU_LSPD_1000MB_MACCLK_12_5) {
2669 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2671 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2675 if (tg3_flag(tp, 5717_PLUS) &&
2676 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2679 tg3_phy_apply_otp(tp);
2681 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2682 tg3_phy_toggle_apd(tp, true);
2684 tg3_phy_toggle_apd(tp, false);
2687 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2688 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2689 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2690 tg3_phydsp_write(tp, 0x000a, 0x0323);
2691 tg3_phy_toggle_auxctl_smdsp(tp, false);
2694 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2695 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2696 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2699 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2700 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2701 tg3_phydsp_write(tp, 0x000a, 0x310b);
2702 tg3_phydsp_write(tp, 0x201f, 0x9506);
2703 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2704 tg3_phy_toggle_auxctl_smdsp(tp, false);
2706 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2707 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2708 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2709 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2710 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2711 tg3_writephy(tp, MII_TG3_TEST1,
2712 MII_TG3_TEST1_TRIM_EN | 0x4);
2714 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2716 tg3_phy_toggle_auxctl_smdsp(tp, false);
2720 /* Set Extended packet length bit (bit 14) on all chips that */
2721 /* support jumbo frames */
2722 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2723 /* Cannot do read-modify-write on 5401 */
2724 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2725 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2726 /* Set bit 14 with read-modify-write to preserve other bits */
2727 err = tg3_phy_auxctl_read(tp,
2728 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2730 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2731 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2734 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2735 * jumbo frames transmission.
2737 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2738 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2739 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2740 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2743 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2744 /* adjust output voltage */
2745 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2748 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2749 tg3_phydsp_write(tp, 0xffb, 0x4000);
2751 tg3_phy_toggle_automdix(tp, true);
2752 tg3_phy_set_wirespeed(tp);
2756 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2757 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2758 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2759 TG3_GPIO_MSG_NEED_VAUX)
2760 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2761 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2762 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2763 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2764 (TG3_GPIO_MSG_DRVR_PRES << 12))
2766 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2767 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2768 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2769 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2770 (TG3_GPIO_MSG_NEED_VAUX << 12))
2772 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2776 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2777 tg3_asic_rev(tp) == ASIC_REV_5719)
2778 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2780 status = tr32(TG3_CPMU_DRV_STATUS);
2782 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2783 status &= ~(TG3_GPIO_MSG_MASK << shift);
2784 status |= (newstat << shift);
2786 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2787 tg3_asic_rev(tp) == ASIC_REV_5719)
2788 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2790 tw32(TG3_CPMU_DRV_STATUS, status);
2792 return status >> TG3_APE_GPIO_MSG_SHIFT;
2795 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2797 if (!tg3_flag(tp, IS_NIC))
2800 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2801 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2802 tg3_asic_rev(tp) == ASIC_REV_5720) {
2803 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2806 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2808 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2809 TG3_GRC_LCLCTL_PWRSW_DELAY);
2811 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2813 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2814 TG3_GRC_LCLCTL_PWRSW_DELAY);
2820 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2824 if (!tg3_flag(tp, IS_NIC) ||
2825 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2826 tg3_asic_rev(tp) == ASIC_REV_5701)
2829 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2831 tw32_wait_f(GRC_LOCAL_CTRL,
2832 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2833 TG3_GRC_LCLCTL_PWRSW_DELAY);
2835 tw32_wait_f(GRC_LOCAL_CTRL,
2837 TG3_GRC_LCLCTL_PWRSW_DELAY);
2839 tw32_wait_f(GRC_LOCAL_CTRL,
2840 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2841 TG3_GRC_LCLCTL_PWRSW_DELAY);
2844 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2846 if (!tg3_flag(tp, IS_NIC))
2849 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2850 tg3_asic_rev(tp) == ASIC_REV_5701) {
2851 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2852 (GRC_LCLCTRL_GPIO_OE0 |
2853 GRC_LCLCTRL_GPIO_OE1 |
2854 GRC_LCLCTRL_GPIO_OE2 |
2855 GRC_LCLCTRL_GPIO_OUTPUT0 |
2856 GRC_LCLCTRL_GPIO_OUTPUT1),
2857 TG3_GRC_LCLCTL_PWRSW_DELAY);
2858 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2859 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2860 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2861 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2862 GRC_LCLCTRL_GPIO_OE1 |
2863 GRC_LCLCTRL_GPIO_OE2 |
2864 GRC_LCLCTRL_GPIO_OUTPUT0 |
2865 GRC_LCLCTRL_GPIO_OUTPUT1 |
2867 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2868 TG3_GRC_LCLCTL_PWRSW_DELAY);
2870 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2871 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2872 TG3_GRC_LCLCTL_PWRSW_DELAY);
2874 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2875 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2876 TG3_GRC_LCLCTL_PWRSW_DELAY);
2879 u32 grc_local_ctrl = 0;
2881 /* Workaround to prevent overdrawing Amps. */
2882 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2883 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2884 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2886 TG3_GRC_LCLCTL_PWRSW_DELAY);
2889 /* On 5753 and variants, GPIO2 cannot be used. */
2890 no_gpio2 = tp->nic_sram_data_cfg &
2891 NIC_SRAM_DATA_CFG_NO_GPIO2;
2893 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2894 GRC_LCLCTRL_GPIO_OE1 |
2895 GRC_LCLCTRL_GPIO_OE2 |
2896 GRC_LCLCTRL_GPIO_OUTPUT1 |
2897 GRC_LCLCTRL_GPIO_OUTPUT2;
2899 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2900 GRC_LCLCTRL_GPIO_OUTPUT2);
2902 tw32_wait_f(GRC_LOCAL_CTRL,
2903 tp->grc_local_ctrl | grc_local_ctrl,
2904 TG3_GRC_LCLCTL_PWRSW_DELAY);
2906 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2908 tw32_wait_f(GRC_LOCAL_CTRL,
2909 tp->grc_local_ctrl | grc_local_ctrl,
2910 TG3_GRC_LCLCTL_PWRSW_DELAY);
2913 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2914 tw32_wait_f(GRC_LOCAL_CTRL,
2915 tp->grc_local_ctrl | grc_local_ctrl,
2916 TG3_GRC_LCLCTL_PWRSW_DELAY);
2921 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2925 /* Serialize power state transitions */
2926 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2929 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2930 msg = TG3_GPIO_MSG_NEED_VAUX;
2932 msg = tg3_set_function_status(tp, msg);
2934 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2937 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2938 tg3_pwrsrc_switch_to_vaux(tp);
2940 tg3_pwrsrc_die_with_vmain(tp);
2943 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2946 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2948 bool need_vaux = false;
2950 /* The GPIOs do something completely different on 57765. */
2951 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2954 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2955 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2956 tg3_asic_rev(tp) == ASIC_REV_5720) {
2957 tg3_frob_aux_power_5717(tp, include_wol ?
2958 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2962 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2963 struct net_device *dev_peer;
2965 dev_peer = pci_get_drvdata(tp->pdev_peer);
2967 /* remove_one() may have been run on the peer. */
2969 struct tg3 *tp_peer = netdev_priv(dev_peer);
2971 if (tg3_flag(tp_peer, INIT_COMPLETE))
2974 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2975 tg3_flag(tp_peer, ENABLE_ASF))
2980 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2981 tg3_flag(tp, ENABLE_ASF))
2985 tg3_pwrsrc_switch_to_vaux(tp);
2987 tg3_pwrsrc_die_with_vmain(tp);
2990 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2992 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2994 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2995 if (speed != SPEED_10)
2997 } else if (speed == SPEED_10)
3003 static bool tg3_phy_power_bug(struct tg3 *tp)
3005 switch (tg3_asic_rev(tp)) {
3010 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3019 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3028 static bool tg3_phy_led_bug(struct tg3 *tp)
3030 switch (tg3_asic_rev(tp)) {
3033 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3042 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3046 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3049 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3050 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3051 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3052 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3055 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3056 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3057 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3062 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3064 val = tr32(GRC_MISC_CFG);
3065 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3068 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3070 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3073 tg3_writephy(tp, MII_ADVERTISE, 0);
3074 tg3_writephy(tp, MII_BMCR,
3075 BMCR_ANENABLE | BMCR_ANRESTART);
3077 tg3_writephy(tp, MII_TG3_FET_TEST,
3078 phytest | MII_TG3_FET_SHADOW_EN);
3079 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3080 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3082 MII_TG3_FET_SHDW_AUXMODE4,
3085 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3088 } else if (do_low_power) {
3089 if (!tg3_phy_led_bug(tp))
3090 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3091 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3093 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3094 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3095 MII_TG3_AUXCTL_PCTL_VREG_11V;
3096 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3099 /* The PHY should not be powered down on some chips because
3102 if (tg3_phy_power_bug(tp))
3105 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3106 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3107 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3108 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3109 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3110 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3113 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3116 /* tp->lock is held. */
3117 static int tg3_nvram_lock(struct tg3 *tp)
3119 if (tg3_flag(tp, NVRAM)) {
3122 if (tp->nvram_lock_cnt == 0) {
3123 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3124 for (i = 0; i < 8000; i++) {
3125 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3130 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3134 tp->nvram_lock_cnt++;
3139 /* tp->lock is held. */
3140 static void tg3_nvram_unlock(struct tg3 *tp)
3142 if (tg3_flag(tp, NVRAM)) {
3143 if (tp->nvram_lock_cnt > 0)
3144 tp->nvram_lock_cnt--;
3145 if (tp->nvram_lock_cnt == 0)
3146 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3150 /* tp->lock is held. */
3151 static void tg3_enable_nvram_access(struct tg3 *tp)
3153 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3154 u32 nvaccess = tr32(NVRAM_ACCESS);
3156 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3160 /* tp->lock is held. */
3161 static void tg3_disable_nvram_access(struct tg3 *tp)
3163 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3164 u32 nvaccess = tr32(NVRAM_ACCESS);
3166 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3170 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3171 u32 offset, u32 *val)
3176 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3179 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3180 EEPROM_ADDR_DEVID_MASK |
3182 tw32(GRC_EEPROM_ADDR,
3184 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3185 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3186 EEPROM_ADDR_ADDR_MASK) |
3187 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3189 for (i = 0; i < 1000; i++) {
3190 tmp = tr32(GRC_EEPROM_ADDR);
3192 if (tmp & EEPROM_ADDR_COMPLETE)
3196 if (!(tmp & EEPROM_ADDR_COMPLETE))
3199 tmp = tr32(GRC_EEPROM_DATA);
3202 * The data will always be opposite the native endian
3203 * format. Perform a blind byteswap to compensate.
3210 #define NVRAM_CMD_TIMEOUT 10000
3212 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3216 tw32(NVRAM_CMD, nvram_cmd);
3217 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3218 usleep_range(10, 40);
3219 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3225 if (i == NVRAM_CMD_TIMEOUT)
3231 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3233 if (tg3_flag(tp, NVRAM) &&
3234 tg3_flag(tp, NVRAM_BUFFERED) &&
3235 tg3_flag(tp, FLASH) &&
3236 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3237 (tp->nvram_jedecnum == JEDEC_ATMEL))
3239 addr = ((addr / tp->nvram_pagesize) <<
3240 ATMEL_AT45DB0X1B_PAGE_POS) +
3241 (addr % tp->nvram_pagesize);
3246 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3248 if (tg3_flag(tp, NVRAM) &&
3249 tg3_flag(tp, NVRAM_BUFFERED) &&
3250 tg3_flag(tp, FLASH) &&
3251 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3252 (tp->nvram_jedecnum == JEDEC_ATMEL))
3254 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3255 tp->nvram_pagesize) +
3256 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3261 /* NOTE: Data read in from NVRAM is byteswapped according to
3262 * the byteswapping settings for all other register accesses.
3263 * tg3 devices are BE devices, so on a BE machine, the data
3264 * returned will be exactly as it is seen in NVRAM. On a LE
3265 * machine, the 32-bit value will be byteswapped.
3267 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3271 if (!tg3_flag(tp, NVRAM))
3272 return tg3_nvram_read_using_eeprom(tp, offset, val);
3274 offset = tg3_nvram_phys_addr(tp, offset);
3276 if (offset > NVRAM_ADDR_MSK)
3279 ret = tg3_nvram_lock(tp);
3283 tg3_enable_nvram_access(tp);
3285 tw32(NVRAM_ADDR, offset);
3286 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3287 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3290 *val = tr32(NVRAM_RDDATA);
3292 tg3_disable_nvram_access(tp);
3294 tg3_nvram_unlock(tp);
3299 /* Ensures NVRAM data is in bytestream format. */
3300 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3303 int res = tg3_nvram_read(tp, offset, &v);
3305 *val = cpu_to_be32(v);
3309 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3310 u32 offset, u32 len, u8 *buf)
3315 for (i = 0; i < len; i += 4) {
3321 memcpy(&data, buf + i, 4);
3324 * The SEEPROM interface expects the data to always be opposite
3325 * the native endian format. We accomplish this by reversing
3326 * all the operations that would have been performed on the
3327 * data from a call to tg3_nvram_read_be32().
3329 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3331 val = tr32(GRC_EEPROM_ADDR);
3332 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3334 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3336 tw32(GRC_EEPROM_ADDR, val |
3337 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3338 (addr & EEPROM_ADDR_ADDR_MASK) |
3342 for (j = 0; j < 1000; j++) {
3343 val = tr32(GRC_EEPROM_ADDR);
3345 if (val & EEPROM_ADDR_COMPLETE)
3349 if (!(val & EEPROM_ADDR_COMPLETE)) {
3358 /* offset and length are dword aligned */
3359 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3363 u32 pagesize = tp->nvram_pagesize;
3364 u32 pagemask = pagesize - 1;
3368 tmp = kmalloc(pagesize, GFP_KERNEL);
3374 u32 phy_addr, page_off, size;
3376 phy_addr = offset & ~pagemask;
3378 for (j = 0; j < pagesize; j += 4) {
3379 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3380 (__be32 *) (tmp + j));
3387 page_off = offset & pagemask;
3394 memcpy(tmp + page_off, buf, size);
3396 offset = offset + (pagesize - page_off);
3398 tg3_enable_nvram_access(tp);
3401 * Before we can erase the flash page, we need
3402 * to issue a special "write enable" command.
3404 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3406 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3409 /* Erase the target page */
3410 tw32(NVRAM_ADDR, phy_addr);
3412 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3413 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3415 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3418 /* Issue another write enable to start the write. */
3419 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3421 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3424 for (j = 0; j < pagesize; j += 4) {
3427 data = *((__be32 *) (tmp + j));
3429 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3431 tw32(NVRAM_ADDR, phy_addr + j);
3433 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3437 nvram_cmd |= NVRAM_CMD_FIRST;
3438 else if (j == (pagesize - 4))
3439 nvram_cmd |= NVRAM_CMD_LAST;
3441 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3449 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3450 tg3_nvram_exec_cmd(tp, nvram_cmd);
3457 /* offset and length are dword aligned */
3458 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3463 for (i = 0; i < len; i += 4, offset += 4) {
3464 u32 page_off, phy_addr, nvram_cmd;
3467 memcpy(&data, buf + i, 4);
3468 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3470 page_off = offset % tp->nvram_pagesize;
3472 phy_addr = tg3_nvram_phys_addr(tp, offset);
3474 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3476 if (page_off == 0 || i == 0)
3477 nvram_cmd |= NVRAM_CMD_FIRST;
3478 if (page_off == (tp->nvram_pagesize - 4))
3479 nvram_cmd |= NVRAM_CMD_LAST;
3482 nvram_cmd |= NVRAM_CMD_LAST;
3484 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3485 !tg3_flag(tp, FLASH) ||
3486 !tg3_flag(tp, 57765_PLUS))
3487 tw32(NVRAM_ADDR, phy_addr);
3489 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3490 !tg3_flag(tp, 5755_PLUS) &&
3491 (tp->nvram_jedecnum == JEDEC_ST) &&
3492 (nvram_cmd & NVRAM_CMD_FIRST)) {
3495 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3496 ret = tg3_nvram_exec_cmd(tp, cmd);
3500 if (!tg3_flag(tp, FLASH)) {
3501 /* We always do complete word writes to eeprom. */
3502 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3505 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3512 /* offset and length are dword aligned */
3513 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3517 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3518 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3519 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3523 if (!tg3_flag(tp, NVRAM)) {
3524 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3528 ret = tg3_nvram_lock(tp);
3532 tg3_enable_nvram_access(tp);
3533 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3534 tw32(NVRAM_WRITE1, 0x406);
3536 grc_mode = tr32(GRC_MODE);
3537 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3539 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3540 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3543 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3547 grc_mode = tr32(GRC_MODE);
3548 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3550 tg3_disable_nvram_access(tp);
3551 tg3_nvram_unlock(tp);
3554 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3555 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3562 #define RX_CPU_SCRATCH_BASE 0x30000
3563 #define RX_CPU_SCRATCH_SIZE 0x04000
3564 #define TX_CPU_SCRATCH_BASE 0x34000
3565 #define TX_CPU_SCRATCH_SIZE 0x04000
3567 /* tp->lock is held. */
3568 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3571 const int iters = 10000;
3573 for (i = 0; i < iters; i++) {
3574 tw32(cpu_base + CPU_STATE, 0xffffffff);
3575 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3576 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3578 if (pci_channel_offline(tp->pdev))
3582 return (i == iters) ? -EBUSY : 0;
3585 /* tp->lock is held. */
3586 static int tg3_rxcpu_pause(struct tg3 *tp)
3588 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3590 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3591 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3597 /* tp->lock is held. */
3598 static int tg3_txcpu_pause(struct tg3 *tp)
3600 return tg3_pause_cpu(tp, TX_CPU_BASE);
3603 /* tp->lock is held. */
3604 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3606 tw32(cpu_base + CPU_STATE, 0xffffffff);
3607 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3610 /* tp->lock is held. */
3611 static void tg3_rxcpu_resume(struct tg3 *tp)
3613 tg3_resume_cpu(tp, RX_CPU_BASE);
3616 /* tp->lock is held. */
3617 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3621 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3623 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3624 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3626 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3629 if (cpu_base == RX_CPU_BASE) {
3630 rc = tg3_rxcpu_pause(tp);
3633 * There is only an Rx CPU for the 5750 derivative in the
3636 if (tg3_flag(tp, IS_SSB_CORE))
3639 rc = tg3_txcpu_pause(tp);
3643 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3644 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3648 /* Clear firmware's nvram arbitration. */
3649 if (tg3_flag(tp, NVRAM))
3650 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3654 static int tg3_fw_data_len(struct tg3 *tp,
3655 const struct tg3_firmware_hdr *fw_hdr)
3659 /* Non fragmented firmware have one firmware header followed by a
3660 * contiguous chunk of data to be written. The length field in that
3661 * header is not the length of data to be written but the complete
3662 * length of the bss. The data length is determined based on
3663 * tp->fw->size minus headers.
3665 * Fragmented firmware have a main header followed by multiple
3666 * fragments. Each fragment is identical to non fragmented firmware
3667 * with a firmware header followed by a contiguous chunk of data. In
3668 * the main header, the length field is unused and set to 0xffffffff.
3669 * In each fragment header the length is the entire size of that
3670 * fragment i.e. fragment data + header length. Data length is
3671 * therefore length field in the header minus TG3_FW_HDR_LEN.
3673 if (tp->fw_len == 0xffffffff)
3674 fw_len = be32_to_cpu(fw_hdr->len);
3676 fw_len = tp->fw->size;
3678 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3681 /* tp->lock is held. */
3682 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3683 u32 cpu_scratch_base, int cpu_scratch_size,
3684 const struct tg3_firmware_hdr *fw_hdr)
3687 void (*write_op)(struct tg3 *, u32, u32);
3688 int total_len = tp->fw->size;
3690 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3692 "%s: Trying to load TX cpu firmware which is 5705\n",
3697 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3698 write_op = tg3_write_mem;
3700 write_op = tg3_write_indirect_reg32;
3702 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3703 /* It is possible that bootcode is still loading at this point.
3704 * Get the nvram lock first before halting the cpu.
3706 int lock_err = tg3_nvram_lock(tp);
3707 err = tg3_halt_cpu(tp, cpu_base);
3709 tg3_nvram_unlock(tp);
3713 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3714 write_op(tp, cpu_scratch_base + i, 0);
3715 tw32(cpu_base + CPU_STATE, 0xffffffff);
3716 tw32(cpu_base + CPU_MODE,
3717 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3719 /* Subtract additional main header for fragmented firmware and
3720 * advance to the first fragment
3722 total_len -= TG3_FW_HDR_LEN;
3727 u32 *fw_data = (u32 *)(fw_hdr + 1);
3728 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3729 write_op(tp, cpu_scratch_base +
3730 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3732 be32_to_cpu(fw_data[i]));
3734 total_len -= be32_to_cpu(fw_hdr->len);
3736 /* Advance to next fragment */
3737 fw_hdr = (struct tg3_firmware_hdr *)
3738 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3739 } while (total_len > 0);
3747 /* tp->lock is held. */
3748 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3751 const int iters = 5;
3753 tw32(cpu_base + CPU_STATE, 0xffffffff);
3754 tw32_f(cpu_base + CPU_PC, pc);
3756 for (i = 0; i < iters; i++) {
3757 if (tr32(cpu_base + CPU_PC) == pc)
3759 tw32(cpu_base + CPU_STATE, 0xffffffff);
3760 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3761 tw32_f(cpu_base + CPU_PC, pc);
3765 return (i == iters) ? -EBUSY : 0;
3768 /* tp->lock is held. */
3769 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3771 const struct tg3_firmware_hdr *fw_hdr;
3774 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3776 /* Firmware blob starts with version numbers, followed by
3777 start address and length. We are setting complete length.
3778 length = end_address_of_bss - start_address_of_text.
3779 Remainder is the blob to be loaded contiguously
3780 from start address. */
3782 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3783 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3788 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3789 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3794 /* Now startup only the RX cpu. */
3795 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3796 be32_to_cpu(fw_hdr->base_addr));
3798 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3799 "should be %08x\n", __func__,
3800 tr32(RX_CPU_BASE + CPU_PC),
3801 be32_to_cpu(fw_hdr->base_addr));
3805 tg3_rxcpu_resume(tp);
3810 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3812 const int iters = 1000;
3816 /* Wait for boot code to complete initialization and enter service
3817 * loop. It is then safe to download service patches
3819 for (i = 0; i < iters; i++) {
3820 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3827 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3831 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3833 netdev_warn(tp->dev,
3834 "Other patches exist. Not downloading EEE patch\n");
3841 /* tp->lock is held. */
3842 static void tg3_load_57766_firmware(struct tg3 *tp)
3844 struct tg3_firmware_hdr *fw_hdr;
3846 if (!tg3_flag(tp, NO_NVRAM))
3849 if (tg3_validate_rxcpu_state(tp))
3855 /* This firmware blob has a different format than older firmware
3856 * releases as given below. The main difference is we have fragmented
3857 * data to be written to non-contiguous locations.
3859 * In the beginning we have a firmware header identical to other
3860 * firmware which consists of version, base addr and length. The length
3861 * here is unused and set to 0xffffffff.
3863 * This is followed by a series of firmware fragments which are
3864 * individually identical to previous firmware. i.e. they have the
3865 * firmware header and followed by data for that fragment. The version
3866 * field of the individual fragment header is unused.
3869 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3870 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3873 if (tg3_rxcpu_pause(tp))
3876 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3877 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3879 tg3_rxcpu_resume(tp);
3882 /* tp->lock is held. */
3883 static int tg3_load_tso_firmware(struct tg3 *tp)
3885 const struct tg3_firmware_hdr *fw_hdr;
3886 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3889 if (!tg3_flag(tp, FW_TSO))
3892 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3894 /* Firmware blob starts with version numbers, followed by
3895 start address and length. We are setting complete length.
3896 length = end_address_of_bss - start_address_of_text.
3897 Remainder is the blob to be loaded contiguously
3898 from start address. */
3900 cpu_scratch_size = tp->fw_len;
3902 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3903 cpu_base = RX_CPU_BASE;
3904 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3906 cpu_base = TX_CPU_BASE;
3907 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3908 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3911 err = tg3_load_firmware_cpu(tp, cpu_base,
3912 cpu_scratch_base, cpu_scratch_size,
3917 /* Now startup the cpu. */
3918 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3919 be32_to_cpu(fw_hdr->base_addr));
3922 "%s fails to set CPU PC, is %08x should be %08x\n",
3923 __func__, tr32(cpu_base + CPU_PC),
3924 be32_to_cpu(fw_hdr->base_addr));
3928 tg3_resume_cpu(tp, cpu_base);
3932 /* tp->lock is held. */
3933 static void __tg3_set_one_mac_addr(struct tg3 *tp, const u8 *mac_addr,
3936 u32 addr_high, addr_low;
3938 addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3939 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3940 (mac_addr[4] << 8) | mac_addr[5]);
3943 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3944 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3947 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3948 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3952 /* tp->lock is held. */
3953 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3958 for (i = 0; i < 4; i++) {
3959 if (i == 1 && skip_mac_1)
3961 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3964 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3965 tg3_asic_rev(tp) == ASIC_REV_5704) {
3966 for (i = 4; i < 16; i++)
3967 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3970 addr_high = (tp->dev->dev_addr[0] +
3971 tp->dev->dev_addr[1] +
3972 tp->dev->dev_addr[2] +
3973 tp->dev->dev_addr[3] +
3974 tp->dev->dev_addr[4] +
3975 tp->dev->dev_addr[5]) &
3976 TX_BACKOFF_SEED_MASK;
3977 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3980 static void tg3_enable_register_access(struct tg3 *tp)
3983 * Make sure register accesses (indirect or otherwise) will function
3986 pci_write_config_dword(tp->pdev,
3987 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3990 static int tg3_power_up(struct tg3 *tp)
3994 tg3_enable_register_access(tp);
3996 err = pci_set_power_state(tp->pdev, PCI_D0);
3998 /* Switch out of Vaux if it is a NIC */
3999 tg3_pwrsrc_switch_to_vmain(tp);
4001 netdev_err(tp->dev, "Transition to D0 failed\n");
4007 static int tg3_setup_phy(struct tg3 *, bool);
4009 static int tg3_power_down_prepare(struct tg3 *tp)
4012 bool device_should_wake, do_low_power;
4014 tg3_enable_register_access(tp);
4016 /* Restore the CLKREQ setting. */
4017 if (tg3_flag(tp, CLKREQ_BUG))
4018 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4019 PCI_EXP_LNKCTL_CLKREQ_EN);
4021 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4022 tw32(TG3PCI_MISC_HOST_CTRL,
4023 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4025 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4026 tg3_flag(tp, WOL_ENABLE);
4028 if (tg3_flag(tp, USE_PHYLIB)) {
4029 do_low_power = false;
4030 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4031 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4032 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
4033 struct phy_device *phydev;
4036 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4038 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4040 tp->link_config.speed = phydev->speed;
4041 tp->link_config.duplex = phydev->duplex;
4042 tp->link_config.autoneg = phydev->autoneg;
4043 ethtool_convert_link_mode_to_legacy_u32(
4044 &tp->link_config.advertising,
4045 phydev->advertising);
4047 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
4048 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
4050 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
4052 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
4055 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4056 if (tg3_flag(tp, WOL_SPEED_100MB)) {
4057 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
4059 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
4061 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4064 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4069 linkmode_copy(phydev->advertising, advertising);
4070 phy_start_aneg(phydev);
4072 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4073 if (phyid != PHY_ID_BCMAC131) {
4074 phyid &= PHY_BCM_OUI_MASK;
4075 if (phyid == PHY_BCM_OUI_1 ||
4076 phyid == PHY_BCM_OUI_2 ||
4077 phyid == PHY_BCM_OUI_3)
4078 do_low_power = true;
4082 do_low_power = true;
4084 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4085 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4087 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4088 tg3_setup_phy(tp, false);
4091 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4094 val = tr32(GRC_VCPU_EXT_CTRL);
4095 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4096 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4100 for (i = 0; i < 200; i++) {
4101 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4102 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4107 if (tg3_flag(tp, WOL_CAP))
4108 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4109 WOL_DRV_STATE_SHUTDOWN |
4113 if (device_should_wake) {
4116 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4118 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4119 tg3_phy_auxctl_write(tp,
4120 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4121 MII_TG3_AUXCTL_PCTL_WOL_EN |
4122 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4123 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4127 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4128 mac_mode = MAC_MODE_PORT_MODE_GMII;
4129 else if (tp->phy_flags &
4130 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4131 if (tp->link_config.active_speed == SPEED_1000)
4132 mac_mode = MAC_MODE_PORT_MODE_GMII;
4134 mac_mode = MAC_MODE_PORT_MODE_MII;
4136 mac_mode = MAC_MODE_PORT_MODE_MII;
4138 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4139 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4140 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4141 SPEED_100 : SPEED_10;
4142 if (tg3_5700_link_polarity(tp, speed))
4143 mac_mode |= MAC_MODE_LINK_POLARITY;
4145 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4148 mac_mode = MAC_MODE_PORT_MODE_TBI;
4151 if (!tg3_flag(tp, 5750_PLUS))
4152 tw32(MAC_LED_CTRL, tp->led_ctrl);
4154 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4155 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4156 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4157 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4159 if (tg3_flag(tp, ENABLE_APE))
4160 mac_mode |= MAC_MODE_APE_TX_EN |
4161 MAC_MODE_APE_RX_EN |
4162 MAC_MODE_TDE_ENABLE;
4164 tw32_f(MAC_MODE, mac_mode);
4167 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4171 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4172 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4173 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4176 base_val = tp->pci_clock_ctrl;
4177 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4178 CLOCK_CTRL_TXCLK_DISABLE);
4180 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4181 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4182 } else if (tg3_flag(tp, 5780_CLASS) ||
4183 tg3_flag(tp, CPMU_PRESENT) ||
4184 tg3_asic_rev(tp) == ASIC_REV_5906) {
4186 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4187 u32 newbits1, newbits2;
4189 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4190 tg3_asic_rev(tp) == ASIC_REV_5701) {
4191 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4192 CLOCK_CTRL_TXCLK_DISABLE |
4194 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4195 } else if (tg3_flag(tp, 5705_PLUS)) {
4196 newbits1 = CLOCK_CTRL_625_CORE;
4197 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4199 newbits1 = CLOCK_CTRL_ALTCLK;
4200 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4203 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4206 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4209 if (!tg3_flag(tp, 5705_PLUS)) {
4212 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4213 tg3_asic_rev(tp) == ASIC_REV_5701) {
4214 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4215 CLOCK_CTRL_TXCLK_DISABLE |
4216 CLOCK_CTRL_44MHZ_CORE);
4218 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4221 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4222 tp->pci_clock_ctrl | newbits3, 40);
4226 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4227 tg3_power_down_phy(tp, do_low_power);
4229 tg3_frob_aux_power(tp, true);
4231 /* Workaround for unstable PLL clock */
4232 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4233 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4234 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4235 u32 val = tr32(0x7d00);
4237 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4239 if (!tg3_flag(tp, ENABLE_ASF)) {
4242 err = tg3_nvram_lock(tp);
4243 tg3_halt_cpu(tp, RX_CPU_BASE);
4245 tg3_nvram_unlock(tp);
4249 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4251 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4256 static void tg3_power_down(struct tg3 *tp)
4258 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4259 pci_set_power_state(tp->pdev, PCI_D3hot);
4262 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
4264 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4265 case MII_TG3_AUX_STAT_10HALF:
4267 *duplex = DUPLEX_HALF;
4270 case MII_TG3_AUX_STAT_10FULL:
4272 *duplex = DUPLEX_FULL;
4275 case MII_TG3_AUX_STAT_100HALF:
4277 *duplex = DUPLEX_HALF;
4280 case MII_TG3_AUX_STAT_100FULL:
4282 *duplex = DUPLEX_FULL;
4285 case MII_TG3_AUX_STAT_1000HALF:
4286 *speed = SPEED_1000;
4287 *duplex = DUPLEX_HALF;
4290 case MII_TG3_AUX_STAT_1000FULL:
4291 *speed = SPEED_1000;
4292 *duplex = DUPLEX_FULL;
4296 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4297 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4299 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4303 *speed = SPEED_UNKNOWN;
4304 *duplex = DUPLEX_UNKNOWN;
4309 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4314 new_adv = ADVERTISE_CSMA;
4315 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4316 new_adv |= mii_advertise_flowctrl(flowctrl);
4318 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4322 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4323 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4325 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4326 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4327 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4329 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4334 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4337 tw32(TG3_CPMU_EEE_MODE,
4338 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4340 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4345 /* Advertise 100-BaseTX EEE ability */
4346 if (advertise & ADVERTISED_100baseT_Full)
4347 val |= MDIO_AN_EEE_ADV_100TX;
4348 /* Advertise 1000-BaseT EEE ability */
4349 if (advertise & ADVERTISED_1000baseT_Full)
4350 val |= MDIO_AN_EEE_ADV_1000T;
4352 if (!tp->eee.eee_enabled) {
4354 tp->eee.advertised = 0;
4356 tp->eee.advertised = advertise &
4357 (ADVERTISED_100baseT_Full |
4358 ADVERTISED_1000baseT_Full);
4361 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4365 switch (tg3_asic_rev(tp)) {
4367 case ASIC_REV_57765:
4368 case ASIC_REV_57766:
4370 /* If we advertised any eee advertisements above... */
4372 val = MII_TG3_DSP_TAP26_ALNOKO |
4373 MII_TG3_DSP_TAP26_RMRXSTO |
4374 MII_TG3_DSP_TAP26_OPCSINPT;
4375 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4379 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4380 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4381 MII_TG3_DSP_CH34TP2_HIBW01);
4384 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4393 static void tg3_phy_copper_begin(struct tg3 *tp)
4395 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4396 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4399 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4400 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4401 adv = ADVERTISED_10baseT_Half |
4402 ADVERTISED_10baseT_Full;
4403 if (tg3_flag(tp, WOL_SPEED_100MB))
4404 adv |= ADVERTISED_100baseT_Half |
4405 ADVERTISED_100baseT_Full;
4406 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4407 if (!(tp->phy_flags &
4408 TG3_PHYFLG_DISABLE_1G_HD_ADV))
4409 adv |= ADVERTISED_1000baseT_Half;
4410 adv |= ADVERTISED_1000baseT_Full;
4413 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4415 adv = tp->link_config.advertising;
4416 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4417 adv &= ~(ADVERTISED_1000baseT_Half |
4418 ADVERTISED_1000baseT_Full);
4420 fc = tp->link_config.flowctrl;
4423 tg3_phy_autoneg_cfg(tp, adv, fc);
4425 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4426 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4427 /* Normally during power down we want to autonegotiate
4428 * the lowest possible speed for WOL. However, to avoid
4429 * link flap, we leave it untouched.
4434 tg3_writephy(tp, MII_BMCR,
4435 BMCR_ANENABLE | BMCR_ANRESTART);
4438 u32 bmcr, orig_bmcr;
4440 tp->link_config.active_speed = tp->link_config.speed;
4441 tp->link_config.active_duplex = tp->link_config.duplex;
4443 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4444 /* With autoneg disabled, 5715 only links up when the
4445 * advertisement register has the configured speed
4448 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4452 switch (tp->link_config.speed) {
4458 bmcr |= BMCR_SPEED100;
4462 bmcr |= BMCR_SPEED1000;
4466 if (tp->link_config.duplex == DUPLEX_FULL)
4467 bmcr |= BMCR_FULLDPLX;
4469 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4470 (bmcr != orig_bmcr)) {
4471 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4472 for (i = 0; i < 1500; i++) {
4476 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4477 tg3_readphy(tp, MII_BMSR, &tmp))
4479 if (!(tmp & BMSR_LSTATUS)) {
4484 tg3_writephy(tp, MII_BMCR, bmcr);
4490 static int tg3_phy_pull_config(struct tg3 *tp)
4495 err = tg3_readphy(tp, MII_BMCR, &val);
4499 if (!(val & BMCR_ANENABLE)) {
4500 tp->link_config.autoneg = AUTONEG_DISABLE;
4501 tp->link_config.advertising = 0;
4502 tg3_flag_clear(tp, PAUSE_AUTONEG);
4506 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4508 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4511 tp->link_config.speed = SPEED_10;
4514 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4517 tp->link_config.speed = SPEED_100;
4519 case BMCR_SPEED1000:
4520 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4521 tp->link_config.speed = SPEED_1000;
4529 if (val & BMCR_FULLDPLX)
4530 tp->link_config.duplex = DUPLEX_FULL;
4532 tp->link_config.duplex = DUPLEX_HALF;
4534 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4540 tp->link_config.autoneg = AUTONEG_ENABLE;
4541 tp->link_config.advertising = ADVERTISED_Autoneg;
4542 tg3_flag_set(tp, PAUSE_AUTONEG);
4544 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4547 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4551 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4552 tp->link_config.advertising |= adv | ADVERTISED_TP;
4554 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4556 tp->link_config.advertising |= ADVERTISED_FIBRE;
4559 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4562 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4563 err = tg3_readphy(tp, MII_CTRL1000, &val);
4567 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4569 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4573 adv = tg3_decode_flowctrl_1000X(val);
4574 tp->link_config.flowctrl = adv;
4576 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4577 adv = mii_adv_to_ethtool_adv_x(val);
4580 tp->link_config.advertising |= adv;
4587 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4591 /* Turn off tap power management. */
4592 /* Set Extended packet length bit */
4593 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4595 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4596 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4597 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4598 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4599 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4606 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4608 struct ethtool_eee eee;
4610 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4613 tg3_eee_pull_config(tp, &eee);
4615 if (tp->eee.eee_enabled) {
4616 if (tp->eee.advertised != eee.advertised ||
4617 tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4618 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4621 /* EEE is disabled but we're advertising */
4629 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4631 u32 advmsk, tgtadv, advertising;
4633 advertising = tp->link_config.advertising;
4634 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4636 advmsk = ADVERTISE_ALL;
4637 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4638 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4639 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4642 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4645 if ((*lcladv & advmsk) != tgtadv)
4648 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4651 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4653 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4657 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4658 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4659 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4660 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4661 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4663 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4666 if (tg3_ctrl != tgtadv)
4673 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4677 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4680 if (tg3_readphy(tp, MII_STAT1000, &val))
4683 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4686 if (tg3_readphy(tp, MII_LPA, rmtadv))
4689 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4690 tp->link_config.rmt_adv = lpeth;
4695 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4697 if (curr_link_up != tp->link_up) {
4699 netif_carrier_on(tp->dev);
4701 netif_carrier_off(tp->dev);
4702 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4703 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4706 tg3_link_report(tp);
4713 static void tg3_clear_mac_status(struct tg3 *tp)
4718 MAC_STATUS_SYNC_CHANGED |
4719 MAC_STATUS_CFG_CHANGED |
4720 MAC_STATUS_MI_COMPLETION |
4721 MAC_STATUS_LNKSTATE_CHANGED);
4725 static void tg3_setup_eee(struct tg3 *tp)
4729 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4730 TG3_CPMU_EEE_LNKIDL_UART_IDL;
4731 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4732 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4734 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4736 tw32_f(TG3_CPMU_EEE_CTRL,
4737 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4739 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4740 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4741 TG3_CPMU_EEEMD_LPI_IN_RX |
4742 TG3_CPMU_EEEMD_EEE_ENABLE;
4744 if (tg3_asic_rev(tp) != ASIC_REV_5717)
4745 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4747 if (tg3_flag(tp, ENABLE_APE))
4748 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4750 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4752 tw32_f(TG3_CPMU_EEE_DBTMR1,
4753 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4754 (tp->eee.tx_lpi_timer & 0xffff));
4756 tw32_f(TG3_CPMU_EEE_DBTMR2,
4757 TG3_CPMU_DBTMR2_APE_TX_2047US |
4758 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4761 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4763 bool current_link_up;
4765 u32 lcl_adv, rmt_adv;
4770 tg3_clear_mac_status(tp);
4772 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4774 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4778 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4780 /* Some third-party PHYs need to be reset on link going
4783 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4784 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4785 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4787 tg3_readphy(tp, MII_BMSR, &bmsr);
4788 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4789 !(bmsr & BMSR_LSTATUS))
4795 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4796 tg3_readphy(tp, MII_BMSR, &bmsr);
4797 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4798 !tg3_flag(tp, INIT_COMPLETE))
4801 if (!(bmsr & BMSR_LSTATUS)) {
4802 err = tg3_init_5401phy_dsp(tp);
4806 tg3_readphy(tp, MII_BMSR, &bmsr);
4807 for (i = 0; i < 1000; i++) {
4809 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4810 (bmsr & BMSR_LSTATUS)) {
4816 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4817 TG3_PHY_REV_BCM5401_B0 &&
4818 !(bmsr & BMSR_LSTATUS) &&
4819 tp->link_config.active_speed == SPEED_1000) {
4820 err = tg3_phy_reset(tp);
4822 err = tg3_init_5401phy_dsp(tp);
4827 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4828 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4829 /* 5701 {A0,B0} CRC bug workaround */
4830 tg3_writephy(tp, 0x15, 0x0a75);
4831 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4832 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4833 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4836 /* Clear pending interrupts... */
4837 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4838 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4840 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4841 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4842 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4843 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4845 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4846 tg3_asic_rev(tp) == ASIC_REV_5701) {
4847 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4848 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4849 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4851 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4854 current_link_up = false;
4855 current_speed = SPEED_UNKNOWN;
4856 current_duplex = DUPLEX_UNKNOWN;
4857 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4858 tp->link_config.rmt_adv = 0;
4860 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4861 err = tg3_phy_auxctl_read(tp,
4862 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4864 if (!err && !(val & (1 << 10))) {
4865 tg3_phy_auxctl_write(tp,
4866 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4873 for (i = 0; i < 100; i++) {
4874 tg3_readphy(tp, MII_BMSR, &bmsr);
4875 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4876 (bmsr & BMSR_LSTATUS))
4881 if (bmsr & BMSR_LSTATUS) {
4884 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4885 for (i = 0; i < 2000; i++) {
4887 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4892 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4897 for (i = 0; i < 200; i++) {
4898 tg3_readphy(tp, MII_BMCR, &bmcr);
4899 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4901 if (bmcr && bmcr != 0x7fff)
4909 tp->link_config.active_speed = current_speed;
4910 tp->link_config.active_duplex = current_duplex;
4912 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4913 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4915 if ((bmcr & BMCR_ANENABLE) &&
4917 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4918 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4919 current_link_up = true;
4921 /* EEE settings changes take effect only after a phy
4922 * reset. If we have skipped a reset due to Link Flap
4923 * Avoidance being enabled, do it now.
4925 if (!eee_config_ok &&
4926 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4932 if (!(bmcr & BMCR_ANENABLE) &&
4933 tp->link_config.speed == current_speed &&
4934 tp->link_config.duplex == current_duplex) {
4935 current_link_up = true;
4939 if (current_link_up &&
4940 tp->link_config.active_duplex == DUPLEX_FULL) {
4943 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4944 reg = MII_TG3_FET_GEN_STAT;
4945 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4947 reg = MII_TG3_EXT_STAT;
4948 bit = MII_TG3_EXT_STAT_MDIX;
4951 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4952 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4954 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4959 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4960 tg3_phy_copper_begin(tp);
4962 if (tg3_flag(tp, ROBOSWITCH)) {
4963 current_link_up = true;
4964 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4965 current_speed = SPEED_1000;
4966 current_duplex = DUPLEX_FULL;
4967 tp->link_config.active_speed = current_speed;
4968 tp->link_config.active_duplex = current_duplex;
4971 tg3_readphy(tp, MII_BMSR, &bmsr);
4972 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4973 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4974 current_link_up = true;
4977 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4978 if (current_link_up) {
4979 if (tp->link_config.active_speed == SPEED_100 ||
4980 tp->link_config.active_speed == SPEED_10)
4981 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4983 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4984 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4985 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4987 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4989 /* In order for the 5750 core in BCM4785 chip to work properly
4990 * in RGMII mode, the Led Control Register must be set up.
4992 if (tg3_flag(tp, RGMII_MODE)) {
4993 u32 led_ctrl = tr32(MAC_LED_CTRL);
4994 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4996 if (tp->link_config.active_speed == SPEED_10)
4997 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4998 else if (tp->link_config.active_speed == SPEED_100)
4999 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5000 LED_CTRL_100MBPS_ON);
5001 else if (tp->link_config.active_speed == SPEED_1000)
5002 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5003 LED_CTRL_1000MBPS_ON);
5005 tw32(MAC_LED_CTRL, led_ctrl);
5009 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5010 if (tp->link_config.active_duplex == DUPLEX_HALF)
5011 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5013 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5014 if (current_link_up &&
5015 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5016 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5018 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5021 /* ??? Without this setting Netgear GA302T PHY does not
5022 * ??? send/receive packets...
5024 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5025 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5026 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5027 tw32_f(MAC_MI_MODE, tp->mi_mode);
5031 tw32_f(MAC_MODE, tp->mac_mode);
5034 tg3_phy_eee_adjust(tp, current_link_up);
5036 if (tg3_flag(tp, USE_LINKCHG_REG)) {
5037 /* Polled via timer. */
5038 tw32_f(MAC_EVENT, 0);
5040 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5044 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5046 tp->link_config.active_speed == SPEED_1000 &&
5047 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5050 (MAC_STATUS_SYNC_CHANGED |
5051 MAC_STATUS_CFG_CHANGED));
5054 NIC_SRAM_FIRMWARE_MBOX,
5055 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5058 /* Prevent send BD corruption. */
5059 if (tg3_flag(tp, CLKREQ_BUG)) {
5060 if (tp->link_config.active_speed == SPEED_100 ||
5061 tp->link_config.active_speed == SPEED_10)
5062 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5063 PCI_EXP_LNKCTL_CLKREQ_EN);
5065 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5066 PCI_EXP_LNKCTL_CLKREQ_EN);
5069 tg3_test_and_report_link_chg(tp, current_link_up);
5074 struct tg3_fiber_aneginfo {
5076 #define ANEG_STATE_UNKNOWN 0
5077 #define ANEG_STATE_AN_ENABLE 1
5078 #define ANEG_STATE_RESTART_INIT 2
5079 #define ANEG_STATE_RESTART 3
5080 #define ANEG_STATE_DISABLE_LINK_OK 4
5081 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5082 #define ANEG_STATE_ABILITY_DETECT 6
5083 #define ANEG_STATE_ACK_DETECT_INIT 7
5084 #define ANEG_STATE_ACK_DETECT 8
5085 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5086 #define ANEG_STATE_COMPLETE_ACK 10
5087 #define ANEG_STATE_IDLE_DETECT_INIT 11
5088 #define ANEG_STATE_IDLE_DETECT 12
5089 #define ANEG_STATE_LINK_OK 13
5090 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5091 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5094 #define MR_AN_ENABLE 0x00000001
5095 #define MR_RESTART_AN 0x00000002
5096 #define MR_AN_COMPLETE 0x00000004
5097 #define MR_PAGE_RX 0x00000008
5098 #define MR_NP_LOADED 0x00000010
5099 #define MR_TOGGLE_TX 0x00000020
5100 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5101 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5102 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5103 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5104 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5105 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5106 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5107 #define MR_TOGGLE_RX 0x00002000
5108 #define MR_NP_RX 0x00004000
5110 #define MR_LINK_OK 0x80000000
5112 unsigned long link_time, cur_time;
5114 u32 ability_match_cfg;
5115 int ability_match_count;
5117 char ability_match, idle_match, ack_match;
5119 u32 txconfig, rxconfig;
5120 #define ANEG_CFG_NP 0x00000080
5121 #define ANEG_CFG_ACK 0x00000040
5122 #define ANEG_CFG_RF2 0x00000020
5123 #define ANEG_CFG_RF1 0x00000010
5124 #define ANEG_CFG_PS2 0x00000001
5125 #define ANEG_CFG_PS1 0x00008000
5126 #define ANEG_CFG_HD 0x00004000
5127 #define ANEG_CFG_FD 0x00002000
5128 #define ANEG_CFG_INVAL 0x00001f06
5133 #define ANEG_TIMER_ENAB 2
5134 #define ANEG_FAILED -1
5136 #define ANEG_STATE_SETTLE_TIME 10000
5138 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5139 struct tg3_fiber_aneginfo *ap)
5142 unsigned long delta;
5146 if (ap->state == ANEG_STATE_UNKNOWN) {
5150 ap->ability_match_cfg = 0;
5151 ap->ability_match_count = 0;
5152 ap->ability_match = 0;
5158 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5159 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5161 if (rx_cfg_reg != ap->ability_match_cfg) {
5162 ap->ability_match_cfg = rx_cfg_reg;
5163 ap->ability_match = 0;
5164 ap->ability_match_count = 0;
5166 if (++ap->ability_match_count > 1) {
5167 ap->ability_match = 1;
5168 ap->ability_match_cfg = rx_cfg_reg;
5171 if (rx_cfg_reg & ANEG_CFG_ACK)
5179 ap->ability_match_cfg = 0;
5180 ap->ability_match_count = 0;
5181 ap->ability_match = 0;
5187 ap->rxconfig = rx_cfg_reg;
5190 switch (ap->state) {
5191 case ANEG_STATE_UNKNOWN:
5192 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5193 ap->state = ANEG_STATE_AN_ENABLE;
5196 case ANEG_STATE_AN_ENABLE:
5197 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5198 if (ap->flags & MR_AN_ENABLE) {
5201 ap->ability_match_cfg = 0;
5202 ap->ability_match_count = 0;
5203 ap->ability_match = 0;
5207 ap->state = ANEG_STATE_RESTART_INIT;
5209 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5213 case ANEG_STATE_RESTART_INIT:
5214 ap->link_time = ap->cur_time;
5215 ap->flags &= ~(MR_NP_LOADED);
5217 tw32(MAC_TX_AUTO_NEG, 0);
5218 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5219 tw32_f(MAC_MODE, tp->mac_mode);
5222 ret = ANEG_TIMER_ENAB;
5223 ap->state = ANEG_STATE_RESTART;
5226 case ANEG_STATE_RESTART:
5227 delta = ap->cur_time - ap->link_time;
5228 if (delta > ANEG_STATE_SETTLE_TIME)
5229 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5231 ret = ANEG_TIMER_ENAB;
5234 case ANEG_STATE_DISABLE_LINK_OK:
5238 case ANEG_STATE_ABILITY_DETECT_INIT:
5239 ap->flags &= ~(MR_TOGGLE_TX);
5240 ap->txconfig = ANEG_CFG_FD;
5241 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5242 if (flowctrl & ADVERTISE_1000XPAUSE)
5243 ap->txconfig |= ANEG_CFG_PS1;
5244 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5245 ap->txconfig |= ANEG_CFG_PS2;
5246 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5247 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5248 tw32_f(MAC_MODE, tp->mac_mode);
5251 ap->state = ANEG_STATE_ABILITY_DETECT;
5254 case ANEG_STATE_ABILITY_DETECT:
5255 if (ap->ability_match != 0 && ap->rxconfig != 0)
5256 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5259 case ANEG_STATE_ACK_DETECT_INIT:
5260 ap->txconfig |= ANEG_CFG_ACK;
5261 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5262 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5263 tw32_f(MAC_MODE, tp->mac_mode);
5266 ap->state = ANEG_STATE_ACK_DETECT;
5269 case ANEG_STATE_ACK_DETECT:
5270 if (ap->ack_match != 0) {
5271 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5272 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5273 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5275 ap->state = ANEG_STATE_AN_ENABLE;
5277 } else if (ap->ability_match != 0 &&
5278 ap->rxconfig == 0) {
5279 ap->state = ANEG_STATE_AN_ENABLE;
5283 case ANEG_STATE_COMPLETE_ACK_INIT:
5284 if (ap->rxconfig & ANEG_CFG_INVAL) {
5288 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5289 MR_LP_ADV_HALF_DUPLEX |
5290 MR_LP_ADV_SYM_PAUSE |
5291 MR_LP_ADV_ASYM_PAUSE |
5292 MR_LP_ADV_REMOTE_FAULT1 |
5293 MR_LP_ADV_REMOTE_FAULT2 |
5294 MR_LP_ADV_NEXT_PAGE |
5297 if (ap->rxconfig & ANEG_CFG_FD)
5298 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5299 if (ap->rxconfig & ANEG_CFG_HD)
5300 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5301 if (ap->rxconfig & ANEG_CFG_PS1)
5302 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5303 if (ap->rxconfig & ANEG_CFG_PS2)
5304 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5305 if (ap->rxconfig & ANEG_CFG_RF1)
5306 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5307 if (ap->rxconfig & ANEG_CFG_RF2)
5308 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5309 if (ap->rxconfig & ANEG_CFG_NP)
5310 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5312 ap->link_time = ap->cur_time;
5314 ap->flags ^= (MR_TOGGLE_TX);
5315 if (ap->rxconfig & 0x0008)
5316 ap->flags |= MR_TOGGLE_RX;
5317 if (ap->rxconfig & ANEG_CFG_NP)
5318 ap->flags |= MR_NP_RX;
5319 ap->flags |= MR_PAGE_RX;
5321 ap->state = ANEG_STATE_COMPLETE_ACK;
5322 ret = ANEG_TIMER_ENAB;
5325 case ANEG_STATE_COMPLETE_ACK:
5326 if (ap->ability_match != 0 &&
5327 ap->rxconfig == 0) {
5328 ap->state = ANEG_STATE_AN_ENABLE;
5331 delta = ap->cur_time - ap->link_time;
5332 if (delta > ANEG_STATE_SETTLE_TIME) {
5333 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5334 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5336 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5337 !(ap->flags & MR_NP_RX)) {
5338 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5346 case ANEG_STATE_IDLE_DETECT_INIT:
5347 ap->link_time = ap->cur_time;
5348 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5349 tw32_f(MAC_MODE, tp->mac_mode);
5352 ap->state = ANEG_STATE_IDLE_DETECT;
5353 ret = ANEG_TIMER_ENAB;
5356 case ANEG_STATE_IDLE_DETECT:
5357 if (ap->ability_match != 0 &&
5358 ap->rxconfig == 0) {
5359 ap->state = ANEG_STATE_AN_ENABLE;
5362 delta = ap->cur_time - ap->link_time;
5363 if (delta > ANEG_STATE_SETTLE_TIME) {
5364 /* XXX another gem from the Broadcom driver :( */
5365 ap->state = ANEG_STATE_LINK_OK;
5369 case ANEG_STATE_LINK_OK:
5370 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5374 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5375 /* ??? unimplemented */
5378 case ANEG_STATE_NEXT_PAGE_WAIT:
5379 /* ??? unimplemented */
5390 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5393 struct tg3_fiber_aneginfo aninfo;
5394 int status = ANEG_FAILED;
5398 tw32_f(MAC_TX_AUTO_NEG, 0);
5400 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5401 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5404 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5407 memset(&aninfo, 0, sizeof(aninfo));
5408 aninfo.flags |= MR_AN_ENABLE;
5409 aninfo.state = ANEG_STATE_UNKNOWN;
5410 aninfo.cur_time = 0;
5412 while (++tick < 195000) {
5413 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5414 if (status == ANEG_DONE || status == ANEG_FAILED)
5420 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5421 tw32_f(MAC_MODE, tp->mac_mode);
5424 *txflags = aninfo.txconfig;
5425 *rxflags = aninfo.flags;
5427 if (status == ANEG_DONE &&
5428 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5429 MR_LP_ADV_FULL_DUPLEX)))
5435 static void tg3_init_bcm8002(struct tg3 *tp)
5437 u32 mac_status = tr32(MAC_STATUS);
5440 /* Reset when initting first time or we have a link. */
5441 if (tg3_flag(tp, INIT_COMPLETE) &&
5442 !(mac_status & MAC_STATUS_PCS_SYNCED))
5445 /* Set PLL lock range. */
5446 tg3_writephy(tp, 0x16, 0x8007);
5449 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5451 /* Wait for reset to complete. */
5452 /* XXX schedule_timeout() ... */
5453 for (i = 0; i < 500; i++)
5456 /* Config mode; select PMA/Ch 1 regs. */
5457 tg3_writephy(tp, 0x10, 0x8411);
5459 /* Enable auto-lock and comdet, select txclk for tx. */
5460 tg3_writephy(tp, 0x11, 0x0a10);
5462 tg3_writephy(tp, 0x18, 0x00a0);
5463 tg3_writephy(tp, 0x16, 0x41ff);
5465 /* Assert and deassert POR. */
5466 tg3_writephy(tp, 0x13, 0x0400);
5468 tg3_writephy(tp, 0x13, 0x0000);
5470 tg3_writephy(tp, 0x11, 0x0a50);
5472 tg3_writephy(tp, 0x11, 0x0a10);
5474 /* Wait for signal to stabilize */
5475 /* XXX schedule_timeout() ... */
5476 for (i = 0; i < 15000; i++)
5479 /* Deselect the channel register so we can read the PHYID
5482 tg3_writephy(tp, 0x10, 0x8011);
5485 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5488 bool current_link_up;
5489 u32 sg_dig_ctrl, sg_dig_status;
5490 u32 serdes_cfg, expected_sg_dig_ctrl;
5491 int workaround, port_a;
5496 current_link_up = false;
5498 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5499 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5501 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5504 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5505 /* preserve bits 20-23 for voltage regulator */
5506 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5509 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5511 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5512 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5514 u32 val = serdes_cfg;
5520 tw32_f(MAC_SERDES_CFG, val);
5523 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5525 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5526 tg3_setup_flow_control(tp, 0, 0);
5527 current_link_up = true;
5532 /* Want auto-negotiation. */
5533 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5535 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5536 if (flowctrl & ADVERTISE_1000XPAUSE)
5537 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5538 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5539 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5541 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5542 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5543 tp->serdes_counter &&
5544 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5545 MAC_STATUS_RCVD_CFG)) ==
5546 MAC_STATUS_PCS_SYNCED)) {
5547 tp->serdes_counter--;
5548 current_link_up = true;
5553 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5554 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5556 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5558 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5559 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5560 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5561 MAC_STATUS_SIGNAL_DET)) {
5562 sg_dig_status = tr32(SG_DIG_STATUS);
5563 mac_status = tr32(MAC_STATUS);
5565 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5566 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5567 u32 local_adv = 0, remote_adv = 0;
5569 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5570 local_adv |= ADVERTISE_1000XPAUSE;
5571 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5572 local_adv |= ADVERTISE_1000XPSE_ASYM;
5574 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5575 remote_adv |= LPA_1000XPAUSE;
5576 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5577 remote_adv |= LPA_1000XPAUSE_ASYM;
5579 tp->link_config.rmt_adv =
5580 mii_adv_to_ethtool_adv_x(remote_adv);
5582 tg3_setup_flow_control(tp, local_adv, remote_adv);
5583 current_link_up = true;
5584 tp->serdes_counter = 0;
5585 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5586 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5587 if (tp->serdes_counter)
5588 tp->serdes_counter--;
5591 u32 val = serdes_cfg;
5598 tw32_f(MAC_SERDES_CFG, val);
5601 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5604 /* Link parallel detection - link is up */
5605 /* only if we have PCS_SYNC and not */
5606 /* receiving config code words */
5607 mac_status = tr32(MAC_STATUS);
5608 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5609 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5610 tg3_setup_flow_control(tp, 0, 0);
5611 current_link_up = true;
5613 TG3_PHYFLG_PARALLEL_DETECT;
5614 tp->serdes_counter =
5615 SERDES_PARALLEL_DET_TIMEOUT;
5617 goto restart_autoneg;
5621 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5622 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5626 return current_link_up;
5629 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5631 bool current_link_up = false;
5633 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5636 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5637 u32 txflags, rxflags;
5640 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5641 u32 local_adv = 0, remote_adv = 0;
5643 if (txflags & ANEG_CFG_PS1)
5644 local_adv |= ADVERTISE_1000XPAUSE;
5645 if (txflags & ANEG_CFG_PS2)
5646 local_adv |= ADVERTISE_1000XPSE_ASYM;
5648 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5649 remote_adv |= LPA_1000XPAUSE;
5650 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5651 remote_adv |= LPA_1000XPAUSE_ASYM;
5653 tp->link_config.rmt_adv =
5654 mii_adv_to_ethtool_adv_x(remote_adv);
5656 tg3_setup_flow_control(tp, local_adv, remote_adv);
5658 current_link_up = true;
5660 for (i = 0; i < 30; i++) {
5663 (MAC_STATUS_SYNC_CHANGED |
5664 MAC_STATUS_CFG_CHANGED));
5666 if ((tr32(MAC_STATUS) &
5667 (MAC_STATUS_SYNC_CHANGED |
5668 MAC_STATUS_CFG_CHANGED)) == 0)
5672 mac_status = tr32(MAC_STATUS);
5673 if (!current_link_up &&
5674 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5675 !(mac_status & MAC_STATUS_RCVD_CFG))
5676 current_link_up = true;
5678 tg3_setup_flow_control(tp, 0, 0);
5680 /* Forcing 1000FD link up. */
5681 current_link_up = true;
5683 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5686 tw32_f(MAC_MODE, tp->mac_mode);
5691 return current_link_up;
5694 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5697 u32 orig_active_speed;
5698 u8 orig_active_duplex;
5700 bool current_link_up;
5703 orig_pause_cfg = tp->link_config.active_flowctrl;
5704 orig_active_speed = tp->link_config.active_speed;
5705 orig_active_duplex = tp->link_config.active_duplex;
5707 if (!tg3_flag(tp, HW_AUTONEG) &&
5709 tg3_flag(tp, INIT_COMPLETE)) {
5710 mac_status = tr32(MAC_STATUS);
5711 mac_status &= (MAC_STATUS_PCS_SYNCED |
5712 MAC_STATUS_SIGNAL_DET |
5713 MAC_STATUS_CFG_CHANGED |
5714 MAC_STATUS_RCVD_CFG);
5715 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5716 MAC_STATUS_SIGNAL_DET)) {
5717 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5718 MAC_STATUS_CFG_CHANGED));
5723 tw32_f(MAC_TX_AUTO_NEG, 0);
5725 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5726 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5727 tw32_f(MAC_MODE, tp->mac_mode);
5730 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5731 tg3_init_bcm8002(tp);
5733 /* Enable link change event even when serdes polling. */
5734 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5737 tp->link_config.rmt_adv = 0;
5738 mac_status = tr32(MAC_STATUS);
5740 if (tg3_flag(tp, HW_AUTONEG))
5741 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5743 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5745 tp->napi[0].hw_status->status =
5746 (SD_STATUS_UPDATED |
5747 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5749 for (i = 0; i < 100; i++) {
5750 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5751 MAC_STATUS_CFG_CHANGED));
5753 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5754 MAC_STATUS_CFG_CHANGED |
5755 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5759 mac_status = tr32(MAC_STATUS);
5760 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5761 current_link_up = false;
5762 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5763 tp->serdes_counter == 0) {
5764 tw32_f(MAC_MODE, (tp->mac_mode |
5765 MAC_MODE_SEND_CONFIGS));
5767 tw32_f(MAC_MODE, tp->mac_mode);
5771 if (current_link_up) {
5772 tp->link_config.active_speed = SPEED_1000;
5773 tp->link_config.active_duplex = DUPLEX_FULL;
5774 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5775 LED_CTRL_LNKLED_OVERRIDE |
5776 LED_CTRL_1000MBPS_ON));
5778 tp->link_config.active_speed = SPEED_UNKNOWN;
5779 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5780 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5781 LED_CTRL_LNKLED_OVERRIDE |
5782 LED_CTRL_TRAFFIC_OVERRIDE));
5785 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5786 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5787 if (orig_pause_cfg != now_pause_cfg ||
5788 orig_active_speed != tp->link_config.active_speed ||
5789 orig_active_duplex != tp->link_config.active_duplex)
5790 tg3_link_report(tp);
5796 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5800 u32 current_speed = SPEED_UNKNOWN;
5801 u8 current_duplex = DUPLEX_UNKNOWN;
5802 bool current_link_up = false;
5803 u32 local_adv, remote_adv, sgsr;
5805 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5806 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5807 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5808 (sgsr & SERDES_TG3_SGMII_MODE)) {
5813 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5815 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5816 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5818 current_link_up = true;
5819 if (sgsr & SERDES_TG3_SPEED_1000) {
5820 current_speed = SPEED_1000;
5821 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5822 } else if (sgsr & SERDES_TG3_SPEED_100) {
5823 current_speed = SPEED_100;
5824 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5826 current_speed = SPEED_10;
5827 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5830 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5831 current_duplex = DUPLEX_FULL;
5833 current_duplex = DUPLEX_HALF;
5836 tw32_f(MAC_MODE, tp->mac_mode);
5839 tg3_clear_mac_status(tp);
5841 goto fiber_setup_done;
5844 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5845 tw32_f(MAC_MODE, tp->mac_mode);
5848 tg3_clear_mac_status(tp);
5853 tp->link_config.rmt_adv = 0;
5855 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5856 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5857 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5858 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5859 bmsr |= BMSR_LSTATUS;
5861 bmsr &= ~BMSR_LSTATUS;
5864 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5866 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5867 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5868 /* do nothing, just check for link up at the end */
5869 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5872 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5873 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5874 ADVERTISE_1000XPAUSE |
5875 ADVERTISE_1000XPSE_ASYM |
5878 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5879 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5881 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5882 tg3_writephy(tp, MII_ADVERTISE, newadv);
5883 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5884 tg3_writephy(tp, MII_BMCR, bmcr);
5886 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5887 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5888 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5895 bmcr &= ~BMCR_SPEED1000;
5896 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5898 if (tp->link_config.duplex == DUPLEX_FULL)
5899 new_bmcr |= BMCR_FULLDPLX;
5901 if (new_bmcr != bmcr) {
5902 /* BMCR_SPEED1000 is a reserved bit that needs
5903 * to be set on write.
5905 new_bmcr |= BMCR_SPEED1000;
5907 /* Force a linkdown */
5911 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5912 adv &= ~(ADVERTISE_1000XFULL |
5913 ADVERTISE_1000XHALF |
5915 tg3_writephy(tp, MII_ADVERTISE, adv);
5916 tg3_writephy(tp, MII_BMCR, bmcr |
5920 tg3_carrier_off(tp);
5922 tg3_writephy(tp, MII_BMCR, new_bmcr);
5924 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5925 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5926 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5927 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5928 bmsr |= BMSR_LSTATUS;
5930 bmsr &= ~BMSR_LSTATUS;
5932 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5936 if (bmsr & BMSR_LSTATUS) {
5937 current_speed = SPEED_1000;
5938 current_link_up = true;
5939 if (bmcr & BMCR_FULLDPLX)
5940 current_duplex = DUPLEX_FULL;
5942 current_duplex = DUPLEX_HALF;
5947 if (bmcr & BMCR_ANENABLE) {
5950 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5951 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5952 common = local_adv & remote_adv;
5953 if (common & (ADVERTISE_1000XHALF |
5954 ADVERTISE_1000XFULL)) {
5955 if (common & ADVERTISE_1000XFULL)
5956 current_duplex = DUPLEX_FULL;
5958 current_duplex = DUPLEX_HALF;
5960 tp->link_config.rmt_adv =
5961 mii_adv_to_ethtool_adv_x(remote_adv);
5962 } else if (!tg3_flag(tp, 5780_CLASS)) {
5963 /* Link is up via parallel detect */
5965 current_link_up = false;
5971 if (current_link_up && current_duplex == DUPLEX_FULL)
5972 tg3_setup_flow_control(tp, local_adv, remote_adv);
5974 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5975 if (tp->link_config.active_duplex == DUPLEX_HALF)
5976 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5978 tw32_f(MAC_MODE, tp->mac_mode);
5981 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5983 tp->link_config.active_speed = current_speed;
5984 tp->link_config.active_duplex = current_duplex;
5986 tg3_test_and_report_link_chg(tp, current_link_up);
5990 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5992 if (tp->serdes_counter) {
5993 /* Give autoneg time to complete. */
5994 tp->serdes_counter--;
5999 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6002 tg3_readphy(tp, MII_BMCR, &bmcr);
6003 if (bmcr & BMCR_ANENABLE) {
6006 /* Select shadow register 0x1f */
6007 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6008 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6010 /* Select expansion interrupt status register */
6011 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6012 MII_TG3_DSP_EXP1_INT_STAT);
6013 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6014 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6016 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6017 /* We have signal detect and not receiving
6018 * config code words, link is up by parallel
6022 bmcr &= ~BMCR_ANENABLE;
6023 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6024 tg3_writephy(tp, MII_BMCR, bmcr);
6025 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6028 } else if (tp->link_up &&
6029 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6030 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6033 /* Select expansion interrupt status register */
6034 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6035 MII_TG3_DSP_EXP1_INT_STAT);
6036 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6040 /* Config code words received, turn on autoneg. */
6041 tg3_readphy(tp, MII_BMCR, &bmcr);
6042 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6044 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6050 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6055 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6056 err = tg3_setup_fiber_phy(tp, force_reset);
6057 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6058 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6060 err = tg3_setup_copper_phy(tp, force_reset);
6062 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6065 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6066 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6068 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6073 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6074 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6075 tw32(GRC_MISC_CFG, val);
6078 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6079 (6 << TX_LENGTHS_IPG_SHIFT);
6080 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6081 tg3_asic_rev(tp) == ASIC_REV_5762)
6082 val |= tr32(MAC_TX_LENGTHS) &
6083 (TX_LENGTHS_JMB_FRM_LEN_MSK |
6084 TX_LENGTHS_CNT_DWN_VAL_MSK);
6086 if (tp->link_config.active_speed == SPEED_1000 &&
6087 tp->link_config.active_duplex == DUPLEX_HALF)
6088 tw32(MAC_TX_LENGTHS, val |
6089 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6091 tw32(MAC_TX_LENGTHS, val |
6092 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6094 if (!tg3_flag(tp, 5705_PLUS)) {
6096 tw32(HOSTCC_STAT_COAL_TICKS,
6097 tp->coal.stats_block_coalesce_usecs);
6099 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6103 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6104 val = tr32(PCIE_PWR_MGMT_THRESH);
6106 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6109 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6110 tw32(PCIE_PWR_MGMT_THRESH, val);
6116 /* tp->lock must be held */
6117 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
6121 ptp_read_system_prets(sts);
6122 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6123 ptp_read_system_postts(sts);
6124 stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6129 /* tp->lock must be held */
6130 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6132 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6134 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6135 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6136 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6137 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6140 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6141 static inline void tg3_full_unlock(struct tg3 *tp);
6142 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6144 struct tg3 *tp = netdev_priv(dev);
6146 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6147 SOF_TIMESTAMPING_RX_SOFTWARE |
6148 SOF_TIMESTAMPING_SOFTWARE;
6150 if (tg3_flag(tp, PTP_CAPABLE)) {
6151 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6152 SOF_TIMESTAMPING_RX_HARDWARE |
6153 SOF_TIMESTAMPING_RAW_HARDWARE;
6157 info->phc_index = ptp_clock_index(tp->ptp_clock);
6159 info->phc_index = -1;
6161 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6163 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6164 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6165 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6166 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6170 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6172 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6173 bool neg_adj = false;
6181 /* Frequency adjustment is performed using hardware with a 24 bit
6182 * accumulator and a programmable correction value. On each clk, the
6183 * correction value gets added to the accumulator and when it
6184 * overflows, the time counter is incremented/decremented.
6186 * So conversion from ppb to correction value is
6187 * ppb * (1 << 24) / 1000000000
6189 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6190 TG3_EAV_REF_CLK_CORRECT_MASK;
6192 tg3_full_lock(tp, 0);
6195 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6196 TG3_EAV_REF_CLK_CORRECT_EN |
6197 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6199 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6201 tg3_full_unlock(tp);
6206 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6208 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6210 tg3_full_lock(tp, 0);
6211 tp->ptp_adjust += delta;
6212 tg3_full_unlock(tp);
6217 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
6218 struct ptp_system_timestamp *sts)
6221 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6223 tg3_full_lock(tp, 0);
6224 ns = tg3_refclk_read(tp, sts);
6225 ns += tp->ptp_adjust;
6226 tg3_full_unlock(tp);
6228 *ts = ns_to_timespec64(ns);
6233 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6234 const struct timespec64 *ts)
6237 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6239 ns = timespec64_to_ns(ts);
6241 tg3_full_lock(tp, 0);
6242 tg3_refclk_write(tp, ns);
6244 tg3_full_unlock(tp);
6249 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6250 struct ptp_clock_request *rq, int on)
6252 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6257 case PTP_CLK_REQ_PEROUT:
6258 /* Reject requests with unsupported flags */
6259 if (rq->perout.flags)
6262 if (rq->perout.index != 0)
6265 tg3_full_lock(tp, 0);
6266 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6267 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6272 nsec = rq->perout.start.sec * 1000000000ULL +
6273 rq->perout.start.nsec;
6275 if (rq->perout.period.sec || rq->perout.period.nsec) {
6276 netdev_warn(tp->dev,
6277 "Device supports only a one-shot timesync output, period must be 0\n");
6282 if (nsec & (1ULL << 63)) {
6283 netdev_warn(tp->dev,
6284 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6289 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6290 tw32(TG3_EAV_WATCHDOG0_MSB,
6291 TG3_EAV_WATCHDOG0_EN |
6292 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6294 tw32(TG3_EAV_REF_CLCK_CTL,
6295 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6297 tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6298 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6302 tg3_full_unlock(tp);
6312 static const struct ptp_clock_info tg3_ptp_caps = {
6313 .owner = THIS_MODULE,
6314 .name = "tg3 clock",
6315 .max_adj = 250000000,
6321 .adjfreq = tg3_ptp_adjfreq,
6322 .adjtime = tg3_ptp_adjtime,
6323 .gettimex64 = tg3_ptp_gettimex,
6324 .settime64 = tg3_ptp_settime,
6325 .enable = tg3_ptp_enable,
6328 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6329 struct skb_shared_hwtstamps *timestamp)
6331 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6332 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6336 /* tp->lock must be held */
6337 static void tg3_ptp_init(struct tg3 *tp)
6339 if (!tg3_flag(tp, PTP_CAPABLE))
6342 /* Initialize the hardware clock to the system time. */
6343 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6345 tp->ptp_info = tg3_ptp_caps;
6348 /* tp->lock must be held */
6349 static void tg3_ptp_resume(struct tg3 *tp)
6351 if (!tg3_flag(tp, PTP_CAPABLE))
6354 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6358 static void tg3_ptp_fini(struct tg3 *tp)
6360 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6363 ptp_clock_unregister(tp->ptp_clock);
6364 tp->ptp_clock = NULL;
6368 static inline int tg3_irq_sync(struct tg3 *tp)
6370 return tp->irq_sync;
6373 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6377 dst = (u32 *)((u8 *)dst + off);
6378 for (i = 0; i < len; i += sizeof(u32))
6379 *dst++ = tr32(off + i);
6382 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6384 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6385 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6386 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6387 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6388 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6389 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6390 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6391 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6392 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6393 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6394 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6395 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6396 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6397 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6398 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6399 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6400 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6401 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6402 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6404 if (tg3_flag(tp, SUPPORT_MSIX))
6405 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6407 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6408 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6409 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6410 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6411 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6412 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6413 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6414 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6416 if (!tg3_flag(tp, 5705_PLUS)) {
6417 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6418 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6419 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6422 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6423 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6424 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6425 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6426 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6428 if (tg3_flag(tp, NVRAM))
6429 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6432 static void tg3_dump_state(struct tg3 *tp)
6437 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6441 if (tg3_flag(tp, PCI_EXPRESS)) {
6442 /* Read up to but not including private PCI registers */
6443 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6444 regs[i / sizeof(u32)] = tr32(i);
6446 tg3_dump_legacy_regs(tp, regs);
6448 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6449 if (!regs[i + 0] && !regs[i + 1] &&
6450 !regs[i + 2] && !regs[i + 3])
6453 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6455 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6460 for (i = 0; i < tp->irq_cnt; i++) {
6461 struct tg3_napi *tnapi = &tp->napi[i];
6463 /* SW status block */
6465 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6467 tnapi->hw_status->status,
6468 tnapi->hw_status->status_tag,
6469 tnapi->hw_status->rx_jumbo_consumer,
6470 tnapi->hw_status->rx_consumer,
6471 tnapi->hw_status->rx_mini_consumer,
6472 tnapi->hw_status->idx[0].rx_producer,
6473 tnapi->hw_status->idx[0].tx_consumer);
6476 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6478 tnapi->last_tag, tnapi->last_irq_tag,
6479 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6481 tnapi->prodring.rx_std_prod_idx,
6482 tnapi->prodring.rx_std_cons_idx,
6483 tnapi->prodring.rx_jmb_prod_idx,
6484 tnapi->prodring.rx_jmb_cons_idx);
6488 /* This is called whenever we suspect that the system chipset is re-
6489 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6490 * is bogus tx completions. We try to recover by setting the
6491 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6494 static void tg3_tx_recover(struct tg3 *tp)
6496 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6497 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6499 netdev_warn(tp->dev,
6500 "The system may be re-ordering memory-mapped I/O "
6501 "cycles to the network device, attempting to recover. "
6502 "Please report the problem to the driver maintainer "
6503 "and include system chipset information.\n");
6505 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6508 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6510 /* Tell compiler to fetch tx indices from memory. */
6512 return tnapi->tx_pending -
6513 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6516 /* Tigon3 never reports partial packet sends. So we do not
6517 * need special logic to handle SKBs that have not had all
6518 * of their frags sent yet, like SunGEM does.
6520 static void tg3_tx(struct tg3_napi *tnapi)
6522 struct tg3 *tp = tnapi->tp;
6523 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6524 u32 sw_idx = tnapi->tx_cons;
6525 struct netdev_queue *txq;
6526 int index = tnapi - tp->napi;
6527 unsigned int pkts_compl = 0, bytes_compl = 0;
6529 if (tg3_flag(tp, ENABLE_TSS))
6532 txq = netdev_get_tx_queue(tp->dev, index);
6534 while (sw_idx != hw_idx) {
6535 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6536 struct sk_buff *skb = ri->skb;
6539 if (unlikely(skb == NULL)) {
6544 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6545 struct skb_shared_hwtstamps timestamp;
6546 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6547 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6549 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6551 skb_tstamp_tx(skb, ×tamp);
6554 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping),
6555 skb_headlen(skb), DMA_TO_DEVICE);
6559 while (ri->fragmented) {
6560 ri->fragmented = false;
6561 sw_idx = NEXT_TX(sw_idx);
6562 ri = &tnapi->tx_buffers[sw_idx];
6565 sw_idx = NEXT_TX(sw_idx);
6567 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6568 ri = &tnapi->tx_buffers[sw_idx];
6569 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6572 dma_unmap_page(&tp->pdev->dev,
6573 dma_unmap_addr(ri, mapping),
6574 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6577 while (ri->fragmented) {
6578 ri->fragmented = false;
6579 sw_idx = NEXT_TX(sw_idx);
6580 ri = &tnapi->tx_buffers[sw_idx];
6583 sw_idx = NEXT_TX(sw_idx);
6587 bytes_compl += skb->len;
6589 dev_consume_skb_any(skb);
6591 if (unlikely(tx_bug)) {
6597 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6599 tnapi->tx_cons = sw_idx;
6601 /* Need to make the tx_cons update visible to tg3_start_xmit()
6602 * before checking for netif_queue_stopped(). Without the
6603 * memory barrier, there is a small possibility that tg3_start_xmit()
6604 * will miss it and cause the queue to be stopped forever.
6608 if (unlikely(netif_tx_queue_stopped(txq) &&
6609 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6610 __netif_tx_lock(txq, smp_processor_id());
6611 if (netif_tx_queue_stopped(txq) &&
6612 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6613 netif_tx_wake_queue(txq);
6614 __netif_tx_unlock(txq);
6618 static void tg3_frag_free(bool is_frag, void *data)
6621 skb_free_frag(data);
6626 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6628 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6629 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6634 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz,
6636 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6641 /* Returns size of skb allocated or < 0 on error.
6643 * We only need to fill in the address because the other members
6644 * of the RX descriptor are invariant, see tg3_init_rings.
6646 * Note the purposeful assymetry of cpu vs. chip accesses. For
6647 * posting buffers we only dirty the first cache line of the RX
6648 * descriptor (containing the address). Whereas for the RX status
6649 * buffers the cpu only reads the last cacheline of the RX descriptor
6650 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6652 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6653 u32 opaque_key, u32 dest_idx_unmasked,
6654 unsigned int *frag_size)
6656 struct tg3_rx_buffer_desc *desc;
6657 struct ring_info *map;
6660 int skb_size, data_size, dest_idx;
6662 switch (opaque_key) {
6663 case RXD_OPAQUE_RING_STD:
6664 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6665 desc = &tpr->rx_std[dest_idx];
6666 map = &tpr->rx_std_buffers[dest_idx];
6667 data_size = tp->rx_pkt_map_sz;
6670 case RXD_OPAQUE_RING_JUMBO:
6671 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6672 desc = &tpr->rx_jmb[dest_idx].std;
6673 map = &tpr->rx_jmb_buffers[dest_idx];
6674 data_size = TG3_RX_JMB_MAP_SZ;
6681 /* Do not overwrite any of the map or rp information
6682 * until we are sure we can commit to a new buffer.
6684 * Callers depend upon this behavior and assume that
6685 * we leave everything unchanged if we fail.
6687 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6688 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6689 if (skb_size <= PAGE_SIZE) {
6690 data = napi_alloc_frag(skb_size);
6691 *frag_size = skb_size;
6693 data = kmalloc(skb_size, GFP_ATOMIC);
6699 mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp),
6700 data_size, DMA_FROM_DEVICE);
6701 if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
6702 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6707 dma_unmap_addr_set(map, mapping, mapping);
6709 desc->addr_hi = ((u64)mapping >> 32);
6710 desc->addr_lo = ((u64)mapping & 0xffffffff);
6715 /* We only need to move over in the address because the other
6716 * members of the RX descriptor are invariant. See notes above
6717 * tg3_alloc_rx_data for full details.
6719 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6720 struct tg3_rx_prodring_set *dpr,
6721 u32 opaque_key, int src_idx,
6722 u32 dest_idx_unmasked)
6724 struct tg3 *tp = tnapi->tp;
6725 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6726 struct ring_info *src_map, *dest_map;
6727 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6730 switch (opaque_key) {
6731 case RXD_OPAQUE_RING_STD:
6732 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6733 dest_desc = &dpr->rx_std[dest_idx];
6734 dest_map = &dpr->rx_std_buffers[dest_idx];
6735 src_desc = &spr->rx_std[src_idx];
6736 src_map = &spr->rx_std_buffers[src_idx];
6739 case RXD_OPAQUE_RING_JUMBO:
6740 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6741 dest_desc = &dpr->rx_jmb[dest_idx].std;
6742 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6743 src_desc = &spr->rx_jmb[src_idx].std;
6744 src_map = &spr->rx_jmb_buffers[src_idx];
6751 dest_map->data = src_map->data;
6752 dma_unmap_addr_set(dest_map, mapping,
6753 dma_unmap_addr(src_map, mapping));
6754 dest_desc->addr_hi = src_desc->addr_hi;
6755 dest_desc->addr_lo = src_desc->addr_lo;
6757 /* Ensure that the update to the skb happens after the physical
6758 * addresses have been transferred to the new BD location.
6762 src_map->data = NULL;
6765 /* The RX ring scheme is composed of multiple rings which post fresh
6766 * buffers to the chip, and one special ring the chip uses to report
6767 * status back to the host.
6769 * The special ring reports the status of received packets to the
6770 * host. The chip does not write into the original descriptor the
6771 * RX buffer was obtained from. The chip simply takes the original
6772 * descriptor as provided by the host, updates the status and length
6773 * field, then writes this into the next status ring entry.
6775 * Each ring the host uses to post buffers to the chip is described
6776 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6777 * it is first placed into the on-chip ram. When the packet's length
6778 * is known, it walks down the TG3_BDINFO entries to select the ring.
6779 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6780 * which is within the range of the new packet's length is chosen.
6782 * The "separate ring for rx status" scheme may sound queer, but it makes
6783 * sense from a cache coherency perspective. If only the host writes
6784 * to the buffer post rings, and only the chip writes to the rx status
6785 * rings, then cache lines never move beyond shared-modified state.
6786 * If both the host and chip were to write into the same ring, cache line
6787 * eviction could occur since both entities want it in an exclusive state.
6789 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6791 struct tg3 *tp = tnapi->tp;
6792 u32 work_mask, rx_std_posted = 0;
6793 u32 std_prod_idx, jmb_prod_idx;
6794 u32 sw_idx = tnapi->rx_rcb_ptr;
6797 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6799 hw_idx = *(tnapi->rx_rcb_prod_idx);
6801 * We need to order the read of hw_idx and the read of
6802 * the opaque cookie.
6807 std_prod_idx = tpr->rx_std_prod_idx;
6808 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6809 while (sw_idx != hw_idx && budget > 0) {
6810 struct ring_info *ri;
6811 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6813 struct sk_buff *skb;
6814 dma_addr_t dma_addr;
6815 u32 opaque_key, desc_idx, *post_ptr;
6819 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6820 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6821 if (opaque_key == RXD_OPAQUE_RING_STD) {
6822 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6823 dma_addr = dma_unmap_addr(ri, mapping);
6825 post_ptr = &std_prod_idx;
6827 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6828 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6829 dma_addr = dma_unmap_addr(ri, mapping);
6831 post_ptr = &jmb_prod_idx;
6833 goto next_pkt_nopost;
6835 work_mask |= opaque_key;
6837 if (desc->err_vlan & RXD_ERR_MASK) {
6839 tg3_recycle_rx(tnapi, tpr, opaque_key,
6840 desc_idx, *post_ptr);
6842 /* Other statistics kept track of by card. */
6847 prefetch(data + TG3_RX_OFFSET(tp));
6848 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6851 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6852 RXD_FLAG_PTPSTAT_PTPV1 ||
6853 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6854 RXD_FLAG_PTPSTAT_PTPV2) {
6855 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6856 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6859 if (len > TG3_RX_COPY_THRESH(tp)) {
6861 unsigned int frag_size;
6863 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6864 *post_ptr, &frag_size);
6868 dma_unmap_single(&tp->pdev->dev, dma_addr, skb_size,
6871 /* Ensure that the update to the data happens
6872 * after the usage of the old DMA mapping.
6878 skb = build_skb(data, frag_size);
6880 tg3_frag_free(frag_size != 0, data);
6881 goto drop_it_no_recycle;
6883 skb_reserve(skb, TG3_RX_OFFSET(tp));
6885 tg3_recycle_rx(tnapi, tpr, opaque_key,
6886 desc_idx, *post_ptr);
6888 skb = netdev_alloc_skb(tp->dev,
6889 len + TG3_RAW_IP_ALIGN);
6891 goto drop_it_no_recycle;
6893 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6894 dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len,
6897 data + TG3_RX_OFFSET(tp),
6899 dma_sync_single_for_device(&tp->pdev->dev, dma_addr,
6900 len, DMA_FROM_DEVICE);
6905 tg3_hwclock_to_timestamp(tp, tstamp,
6906 skb_hwtstamps(skb));
6908 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6909 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6910 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6911 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6912 skb->ip_summed = CHECKSUM_UNNECESSARY;
6914 skb_checksum_none_assert(skb);
6916 skb->protocol = eth_type_trans(skb, tp->dev);
6918 if (len > (tp->dev->mtu + ETH_HLEN) &&
6919 skb->protocol != htons(ETH_P_8021Q) &&
6920 skb->protocol != htons(ETH_P_8021AD)) {
6921 dev_kfree_skb_any(skb);
6922 goto drop_it_no_recycle;
6925 if (desc->type_flags & RXD_FLAG_VLAN &&
6926 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6927 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6928 desc->err_vlan & RXD_VLAN_MASK);
6930 napi_gro_receive(&tnapi->napi, skb);
6938 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6939 tpr->rx_std_prod_idx = std_prod_idx &
6940 tp->rx_std_ring_mask;
6941 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6942 tpr->rx_std_prod_idx);
6943 work_mask &= ~RXD_OPAQUE_RING_STD;
6948 sw_idx &= tp->rx_ret_ring_mask;
6950 /* Refresh hw_idx to see if there is new work */
6951 if (sw_idx == hw_idx) {
6952 hw_idx = *(tnapi->rx_rcb_prod_idx);
6957 /* ACK the status ring. */
6958 tnapi->rx_rcb_ptr = sw_idx;
6959 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6961 /* Refill RX ring(s). */
6962 if (!tg3_flag(tp, ENABLE_RSS)) {
6963 /* Sync BD data before updating mailbox */
6966 if (work_mask & RXD_OPAQUE_RING_STD) {
6967 tpr->rx_std_prod_idx = std_prod_idx &
6968 tp->rx_std_ring_mask;
6969 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6970 tpr->rx_std_prod_idx);
6972 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6973 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6974 tp->rx_jmb_ring_mask;
6975 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6976 tpr->rx_jmb_prod_idx);
6978 } else if (work_mask) {
6979 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6980 * updated before the producer indices can be updated.
6984 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6985 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6987 if (tnapi != &tp->napi[1]) {
6988 tp->rx_refill = true;
6989 napi_schedule(&tp->napi[1].napi);
6996 static void tg3_poll_link(struct tg3 *tp)
6998 /* handle link change and other phy events */
6999 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7000 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7002 if (sblk->status & SD_STATUS_LINK_CHG) {
7003 sblk->status = SD_STATUS_UPDATED |
7004 (sblk->status & ~SD_STATUS_LINK_CHG);
7005 spin_lock(&tp->lock);
7006 if (tg3_flag(tp, USE_PHYLIB)) {
7008 (MAC_STATUS_SYNC_CHANGED |
7009 MAC_STATUS_CFG_CHANGED |
7010 MAC_STATUS_MI_COMPLETION |
7011 MAC_STATUS_LNKSTATE_CHANGED));
7014 tg3_setup_phy(tp, false);
7015 spin_unlock(&tp->lock);
7020 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7021 struct tg3_rx_prodring_set *dpr,
7022 struct tg3_rx_prodring_set *spr)
7024 u32 si, di, cpycnt, src_prod_idx;
7028 src_prod_idx = spr->rx_std_prod_idx;
7030 /* Make sure updates to the rx_std_buffers[] entries and the
7031 * standard producer index are seen in the correct order.
7035 if (spr->rx_std_cons_idx == src_prod_idx)
7038 if (spr->rx_std_cons_idx < src_prod_idx)
7039 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7041 cpycnt = tp->rx_std_ring_mask + 1 -
7042 spr->rx_std_cons_idx;
7044 cpycnt = min(cpycnt,
7045 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7047 si = spr->rx_std_cons_idx;
7048 di = dpr->rx_std_prod_idx;
7050 for (i = di; i < di + cpycnt; i++) {
7051 if (dpr->rx_std_buffers[i].data) {
7061 /* Ensure that updates to the rx_std_buffers ring and the
7062 * shadowed hardware producer ring from tg3_recycle_skb() are
7063 * ordered correctly WRT the skb check above.
7067 memcpy(&dpr->rx_std_buffers[di],
7068 &spr->rx_std_buffers[si],
7069 cpycnt * sizeof(struct ring_info));
7071 for (i = 0; i < cpycnt; i++, di++, si++) {
7072 struct tg3_rx_buffer_desc *sbd, *dbd;
7073 sbd = &spr->rx_std[si];
7074 dbd = &dpr->rx_std[di];
7075 dbd->addr_hi = sbd->addr_hi;
7076 dbd->addr_lo = sbd->addr_lo;
7079 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7080 tp->rx_std_ring_mask;
7081 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7082 tp->rx_std_ring_mask;
7086 src_prod_idx = spr->rx_jmb_prod_idx;
7088 /* Make sure updates to the rx_jmb_buffers[] entries and
7089 * the jumbo producer index are seen in the correct order.
7093 if (spr->rx_jmb_cons_idx == src_prod_idx)
7096 if (spr->rx_jmb_cons_idx < src_prod_idx)
7097 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7099 cpycnt = tp->rx_jmb_ring_mask + 1 -
7100 spr->rx_jmb_cons_idx;
7102 cpycnt = min(cpycnt,
7103 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7105 si = spr->rx_jmb_cons_idx;
7106 di = dpr->rx_jmb_prod_idx;
7108 for (i = di; i < di + cpycnt; i++) {
7109 if (dpr->rx_jmb_buffers[i].data) {
7119 /* Ensure that updates to the rx_jmb_buffers ring and the
7120 * shadowed hardware producer ring from tg3_recycle_skb() are
7121 * ordered correctly WRT the skb check above.
7125 memcpy(&dpr->rx_jmb_buffers[di],
7126 &spr->rx_jmb_buffers[si],
7127 cpycnt * sizeof(struct ring_info));
7129 for (i = 0; i < cpycnt; i++, di++, si++) {
7130 struct tg3_rx_buffer_desc *sbd, *dbd;
7131 sbd = &spr->rx_jmb[si].std;
7132 dbd = &dpr->rx_jmb[di].std;
7133 dbd->addr_hi = sbd->addr_hi;
7134 dbd->addr_lo = sbd->addr_lo;
7137 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7138 tp->rx_jmb_ring_mask;
7139 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7140 tp->rx_jmb_ring_mask;
7146 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7148 struct tg3 *tp = tnapi->tp;
7150 /* run TX completion thread */
7151 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7153 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7157 if (!tnapi->rx_rcb_prod_idx)
7160 /* run RX thread, within the bounds set by NAPI.
7161 * All RX "locking" is done by ensuring outside
7162 * code synchronizes with tg3->napi.poll()
7164 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7165 work_done += tg3_rx(tnapi, budget - work_done);
7167 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7168 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7170 u32 std_prod_idx = dpr->rx_std_prod_idx;
7171 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7173 tp->rx_refill = false;
7174 for (i = 1; i <= tp->rxq_cnt; i++)
7175 err |= tg3_rx_prodring_xfer(tp, dpr,
7176 &tp->napi[i].prodring);
7180 if (std_prod_idx != dpr->rx_std_prod_idx)
7181 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7182 dpr->rx_std_prod_idx);
7184 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7185 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7186 dpr->rx_jmb_prod_idx);
7189 tw32_f(HOSTCC_MODE, tp->coal_now);
7195 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7197 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7198 schedule_work(&tp->reset_task);
7201 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7203 if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7204 cancel_work_sync(&tp->reset_task);
7205 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7208 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7210 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7211 struct tg3 *tp = tnapi->tp;
7213 struct tg3_hw_status *sblk = tnapi->hw_status;
7216 work_done = tg3_poll_work(tnapi, work_done, budget);
7218 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7221 if (unlikely(work_done >= budget))
7224 /* tp->last_tag is used in tg3_int_reenable() below
7225 * to tell the hw how much work has been processed,
7226 * so we must read it before checking for more work.
7228 tnapi->last_tag = sblk->status_tag;
7229 tnapi->last_irq_tag = tnapi->last_tag;
7232 /* check for RX/TX work to do */
7233 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7234 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7236 /* This test here is not race free, but will reduce
7237 * the number of interrupts by looping again.
7239 if (tnapi == &tp->napi[1] && tp->rx_refill)
7242 napi_complete_done(napi, work_done);
7243 /* Reenable interrupts. */
7244 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7246 /* This test here is synchronized by napi_schedule()
7247 * and napi_complete() to close the race condition.
7249 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7250 tw32(HOSTCC_MODE, tp->coalesce_mode |
7251 HOSTCC_MODE_ENABLE |
7258 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7262 /* work_done is guaranteed to be less than budget. */
7263 napi_complete(napi);
7264 tg3_reset_task_schedule(tp);
7268 static void tg3_process_error(struct tg3 *tp)
7271 bool real_error = false;
7273 if (tg3_flag(tp, ERROR_PROCESSED))
7276 /* Check Flow Attention register */
7277 val = tr32(HOSTCC_FLOW_ATTN);
7278 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7279 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7283 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7284 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7288 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7289 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7298 tg3_flag_set(tp, ERROR_PROCESSED);
7299 tg3_reset_task_schedule(tp);
7302 static int tg3_poll(struct napi_struct *napi, int budget)
7304 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7305 struct tg3 *tp = tnapi->tp;
7307 struct tg3_hw_status *sblk = tnapi->hw_status;
7310 if (sblk->status & SD_STATUS_ERROR)
7311 tg3_process_error(tp);
7315 work_done = tg3_poll_work(tnapi, work_done, budget);
7317 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7320 if (unlikely(work_done >= budget))
7323 if (tg3_flag(tp, TAGGED_STATUS)) {
7324 /* tp->last_tag is used in tg3_int_reenable() below
7325 * to tell the hw how much work has been processed,
7326 * so we must read it before checking for more work.
7328 tnapi->last_tag = sblk->status_tag;
7329 tnapi->last_irq_tag = tnapi->last_tag;
7332 sblk->status &= ~SD_STATUS_UPDATED;
7334 if (likely(!tg3_has_work(tnapi))) {
7335 napi_complete_done(napi, work_done);
7336 tg3_int_reenable(tnapi);
7341 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7345 /* work_done is guaranteed to be less than budget. */
7346 napi_complete(napi);
7347 tg3_reset_task_schedule(tp);
7351 static void tg3_napi_disable(struct tg3 *tp)
7355 for (i = tp->irq_cnt - 1; i >= 0; i--)
7356 napi_disable(&tp->napi[i].napi);
7359 static void tg3_napi_enable(struct tg3 *tp)
7363 for (i = 0; i < tp->irq_cnt; i++)
7364 napi_enable(&tp->napi[i].napi);
7367 static void tg3_napi_init(struct tg3 *tp)
7371 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7372 for (i = 1; i < tp->irq_cnt; i++)
7373 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7376 static void tg3_napi_fini(struct tg3 *tp)
7380 for (i = 0; i < tp->irq_cnt; i++)
7381 netif_napi_del(&tp->napi[i].napi);
7384 static inline void tg3_netif_stop(struct tg3 *tp)
7386 netif_trans_update(tp->dev); /* prevent tx timeout */
7387 tg3_napi_disable(tp);
7388 netif_carrier_off(tp->dev);
7389 netif_tx_disable(tp->dev);
7392 /* tp->lock must be held */
7393 static inline void tg3_netif_start(struct tg3 *tp)
7397 /* NOTE: unconditional netif_tx_wake_all_queues is only
7398 * appropriate so long as all callers are assured to
7399 * have free tx slots (such as after tg3_init_hw)
7401 netif_tx_wake_all_queues(tp->dev);
7404 netif_carrier_on(tp->dev);
7406 tg3_napi_enable(tp);
7407 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7408 tg3_enable_ints(tp);
7411 static void tg3_irq_quiesce(struct tg3 *tp)
7412 __releases(tp->lock)
7413 __acquires(tp->lock)
7417 BUG_ON(tp->irq_sync);
7422 spin_unlock_bh(&tp->lock);
7424 for (i = 0; i < tp->irq_cnt; i++)
7425 synchronize_irq(tp->napi[i].irq_vec);
7427 spin_lock_bh(&tp->lock);
7430 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7431 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7432 * with as well. Most of the time, this is not necessary except when
7433 * shutting down the device.
7435 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7437 spin_lock_bh(&tp->lock);
7439 tg3_irq_quiesce(tp);
7442 static inline void tg3_full_unlock(struct tg3 *tp)
7444 spin_unlock_bh(&tp->lock);
7447 /* One-shot MSI handler - Chip automatically disables interrupt
7448 * after sending MSI so driver doesn't have to do it.
7450 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7452 struct tg3_napi *tnapi = dev_id;
7453 struct tg3 *tp = tnapi->tp;
7455 prefetch(tnapi->hw_status);
7457 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7459 if (likely(!tg3_irq_sync(tp)))
7460 napi_schedule(&tnapi->napi);
7465 /* MSI ISR - No need to check for interrupt sharing and no need to
7466 * flush status block and interrupt mailbox. PCI ordering rules
7467 * guarantee that MSI will arrive after the status block.
7469 static irqreturn_t tg3_msi(int irq, void *dev_id)
7471 struct tg3_napi *tnapi = dev_id;
7472 struct tg3 *tp = tnapi->tp;
7474 prefetch(tnapi->hw_status);
7476 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7478 * Writing any value to intr-mbox-0 clears PCI INTA# and
7479 * chip-internal interrupt pending events.
7480 * Writing non-zero to intr-mbox-0 additional tells the
7481 * NIC to stop sending us irqs, engaging "in-intr-handler"
7484 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7485 if (likely(!tg3_irq_sync(tp)))
7486 napi_schedule(&tnapi->napi);
7488 return IRQ_RETVAL(1);
7491 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7493 struct tg3_napi *tnapi = dev_id;
7494 struct tg3 *tp = tnapi->tp;
7495 struct tg3_hw_status *sblk = tnapi->hw_status;
7496 unsigned int handled = 1;
7498 /* In INTx mode, it is possible for the interrupt to arrive at
7499 * the CPU before the status block posted prior to the interrupt.
7500 * Reading the PCI State register will confirm whether the
7501 * interrupt is ours and will flush the status block.
7503 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7504 if (tg3_flag(tp, CHIP_RESETTING) ||
7505 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7512 * Writing any value to intr-mbox-0 clears PCI INTA# and
7513 * chip-internal interrupt pending events.
7514 * Writing non-zero to intr-mbox-0 additional tells the
7515 * NIC to stop sending us irqs, engaging "in-intr-handler"
7518 * Flush the mailbox to de-assert the IRQ immediately to prevent
7519 * spurious interrupts. The flush impacts performance but
7520 * excessive spurious interrupts can be worse in some cases.
7522 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7523 if (tg3_irq_sync(tp))
7525 sblk->status &= ~SD_STATUS_UPDATED;
7526 if (likely(tg3_has_work(tnapi))) {
7527 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7528 napi_schedule(&tnapi->napi);
7530 /* No work, shared interrupt perhaps? re-enable
7531 * interrupts, and flush that PCI write
7533 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7537 return IRQ_RETVAL(handled);
7540 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7542 struct tg3_napi *tnapi = dev_id;
7543 struct tg3 *tp = tnapi->tp;
7544 struct tg3_hw_status *sblk = tnapi->hw_status;
7545 unsigned int handled = 1;
7547 /* In INTx mode, it is possible for the interrupt to arrive at
7548 * the CPU before the status block posted prior to the interrupt.
7549 * Reading the PCI State register will confirm whether the
7550 * interrupt is ours and will flush the status block.
7552 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7553 if (tg3_flag(tp, CHIP_RESETTING) ||
7554 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7561 * writing any value to intr-mbox-0 clears PCI INTA# and
7562 * chip-internal interrupt pending events.
7563 * writing non-zero to intr-mbox-0 additional tells the
7564 * NIC to stop sending us irqs, engaging "in-intr-handler"
7567 * Flush the mailbox to de-assert the IRQ immediately to prevent
7568 * spurious interrupts. The flush impacts performance but
7569 * excessive spurious interrupts can be worse in some cases.
7571 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7574 * In a shared interrupt configuration, sometimes other devices'
7575 * interrupts will scream. We record the current status tag here
7576 * so that the above check can report that the screaming interrupts
7577 * are unhandled. Eventually they will be silenced.
7579 tnapi->last_irq_tag = sblk->status_tag;
7581 if (tg3_irq_sync(tp))
7584 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7586 napi_schedule(&tnapi->napi);
7589 return IRQ_RETVAL(handled);
7592 /* ISR for interrupt test */
7593 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7595 struct tg3_napi *tnapi = dev_id;
7596 struct tg3 *tp = tnapi->tp;
7597 struct tg3_hw_status *sblk = tnapi->hw_status;
7599 if ((sblk->status & SD_STATUS_UPDATED) ||
7600 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7601 tg3_disable_ints(tp);
7602 return IRQ_RETVAL(1);
7604 return IRQ_RETVAL(0);
7607 #ifdef CONFIG_NET_POLL_CONTROLLER
7608 static void tg3_poll_controller(struct net_device *dev)
7611 struct tg3 *tp = netdev_priv(dev);
7613 if (tg3_irq_sync(tp))
7616 for (i = 0; i < tp->irq_cnt; i++)
7617 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7621 static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue)
7623 struct tg3 *tp = netdev_priv(dev);
7625 if (netif_msg_tx_err(tp)) {
7626 netdev_err(dev, "transmit timed out, resetting\n");
7630 tg3_reset_task_schedule(tp);
7633 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7634 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7636 u32 base = (u32) mapping & 0xffffffff;
7638 return base + len + 8 < base;
7641 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7642 * of any 4GB boundaries: 4G, 8G, etc
7644 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7647 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7648 u32 base = (u32) mapping & 0xffffffff;
7650 return ((base + len + (mss & 0x3fff)) < base);
7655 /* Test for DMA addresses > 40-bit */
7656 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7659 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7660 if (tg3_flag(tp, 40BIT_DMA_BUG))
7661 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7668 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7669 dma_addr_t mapping, u32 len, u32 flags,
7672 txbd->addr_hi = ((u64) mapping >> 32);
7673 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7674 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7675 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7678 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7679 dma_addr_t map, u32 len, u32 flags,
7682 struct tg3 *tp = tnapi->tp;
7685 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7688 if (tg3_4g_overflow_test(map, len))
7691 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7694 if (tg3_40bit_overflow_test(tp, map, len))
7697 if (tp->dma_limit) {
7698 u32 prvidx = *entry;
7699 u32 tmp_flag = flags & ~TXD_FLAG_END;
7700 while (len > tp->dma_limit && *budget) {
7701 u32 frag_len = tp->dma_limit;
7702 len -= tp->dma_limit;
7704 /* Avoid the 8byte DMA problem */
7706 len += tp->dma_limit / 2;
7707 frag_len = tp->dma_limit / 2;
7710 tnapi->tx_buffers[*entry].fragmented = true;
7712 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7713 frag_len, tmp_flag, mss, vlan);
7716 *entry = NEXT_TX(*entry);
7723 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7724 len, flags, mss, vlan);
7726 *entry = NEXT_TX(*entry);
7729 tnapi->tx_buffers[prvidx].fragmented = false;
7733 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7734 len, flags, mss, vlan);
7735 *entry = NEXT_TX(*entry);
7741 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7744 struct sk_buff *skb;
7745 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7750 dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping),
7751 skb_headlen(skb), DMA_TO_DEVICE);
7753 while (txb->fragmented) {
7754 txb->fragmented = false;
7755 entry = NEXT_TX(entry);
7756 txb = &tnapi->tx_buffers[entry];
7759 for (i = 0; i <= last; i++) {
7760 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7762 entry = NEXT_TX(entry);
7763 txb = &tnapi->tx_buffers[entry];
7765 dma_unmap_page(&tnapi->tp->pdev->dev,
7766 dma_unmap_addr(txb, mapping),
7767 skb_frag_size(frag), DMA_TO_DEVICE);
7769 while (txb->fragmented) {
7770 txb->fragmented = false;
7771 entry = NEXT_TX(entry);
7772 txb = &tnapi->tx_buffers[entry];
7777 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7778 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7779 struct sk_buff **pskb,
7780 u32 *entry, u32 *budget,
7781 u32 base_flags, u32 mss, u32 vlan)
7783 struct tg3 *tp = tnapi->tp;
7784 struct sk_buff *new_skb, *skb = *pskb;
7785 dma_addr_t new_addr = 0;
7788 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7789 new_skb = skb_copy(skb, GFP_ATOMIC);
7791 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7793 new_skb = skb_copy_expand(skb,
7794 skb_headroom(skb) + more_headroom,
7795 skb_tailroom(skb), GFP_ATOMIC);
7801 /* New SKB is guaranteed to be linear. */
7802 new_addr = dma_map_single(&tp->pdev->dev, new_skb->data,
7803 new_skb->len, DMA_TO_DEVICE);
7804 /* Make sure the mapping succeeded */
7805 if (dma_mapping_error(&tp->pdev->dev, new_addr)) {
7806 dev_kfree_skb_any(new_skb);
7809 u32 save_entry = *entry;
7811 base_flags |= TXD_FLAG_END;
7813 tnapi->tx_buffers[*entry].skb = new_skb;
7814 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7817 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7818 new_skb->len, base_flags,
7820 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7821 dev_kfree_skb_any(new_skb);
7827 dev_consume_skb_any(skb);
7832 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7834 /* Check if we will never have enough descriptors,
7835 * as gso_segs can be more than current ring size
7837 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7840 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7842 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7843 * indicated in tg3_tx_frag_set()
7845 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7846 struct netdev_queue *txq, struct sk_buff *skb)
7848 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7849 struct sk_buff *segs, *seg, *next;
7851 /* Estimate the number of fragments in the worst case */
7852 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7853 netif_tx_stop_queue(txq);
7855 /* netif_tx_stop_queue() must be done before checking
7856 * checking tx index in tg3_tx_avail() below, because in
7857 * tg3_tx(), we update tx index before checking for
7858 * netif_tx_queue_stopped().
7861 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7862 return NETDEV_TX_BUSY;
7864 netif_tx_wake_queue(txq);
7867 segs = skb_gso_segment(skb, tp->dev->features &
7868 ~(NETIF_F_TSO | NETIF_F_TSO6));
7869 if (IS_ERR(segs) || !segs)
7870 goto tg3_tso_bug_end;
7872 skb_list_walk_safe(segs, seg, next) {
7873 skb_mark_not_on_list(seg);
7874 tg3_start_xmit(seg, tp->dev);
7878 dev_consume_skb_any(skb);
7880 return NETDEV_TX_OK;
7883 /* hard_start_xmit for all devices */
7884 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7886 struct tg3 *tp = netdev_priv(dev);
7887 u32 len, entry, base_flags, mss, vlan = 0;
7889 int i = -1, would_hit_hwbug;
7891 struct tg3_napi *tnapi;
7892 struct netdev_queue *txq;
7894 struct iphdr *iph = NULL;
7895 struct tcphdr *tcph = NULL;
7896 __sum16 tcp_csum = 0, ip_csum = 0;
7897 __be16 ip_tot_len = 0;
7899 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7900 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7901 if (tg3_flag(tp, ENABLE_TSS))
7904 budget = tg3_tx_avail(tnapi);
7906 /* We are running in BH disabled context with netif_tx_lock
7907 * and TX reclaim runs via tp->napi.poll inside of a software
7908 * interrupt. Furthermore, IRQ processing runs lockless so we have
7909 * no IRQ context deadlocks to worry about either. Rejoice!
7911 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7912 if (!netif_tx_queue_stopped(txq)) {
7913 netif_tx_stop_queue(txq);
7915 /* This is a hard error, log it. */
7917 "BUG! Tx Ring full when queue awake!\n");
7919 return NETDEV_TX_BUSY;
7922 entry = tnapi->tx_prod;
7925 mss = skb_shinfo(skb)->gso_size;
7927 u32 tcp_opt_len, hdr_len;
7929 if (skb_cow_head(skb, 0))
7933 tcp_opt_len = tcp_optlen(skb);
7935 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7937 /* HW/FW can not correctly segment packets that have been
7938 * vlan encapsulated.
7940 if (skb->protocol == htons(ETH_P_8021Q) ||
7941 skb->protocol == htons(ETH_P_8021AD)) {
7942 if (tg3_tso_bug_gso_check(tnapi, skb))
7943 return tg3_tso_bug(tp, tnapi, txq, skb);
7947 if (!skb_is_gso_v6(skb)) {
7948 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7949 tg3_flag(tp, TSO_BUG)) {
7950 if (tg3_tso_bug_gso_check(tnapi, skb))
7951 return tg3_tso_bug(tp, tnapi, txq, skb);
7954 ip_csum = iph->check;
7955 ip_tot_len = iph->tot_len;
7957 iph->tot_len = htons(mss + hdr_len);
7960 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7961 TXD_FLAG_CPU_POST_DMA);
7963 tcph = tcp_hdr(skb);
7964 tcp_csum = tcph->check;
7966 if (tg3_flag(tp, HW_TSO_1) ||
7967 tg3_flag(tp, HW_TSO_2) ||
7968 tg3_flag(tp, HW_TSO_3)) {
7970 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7972 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7976 if (tg3_flag(tp, HW_TSO_3)) {
7977 mss |= (hdr_len & 0xc) << 12;
7979 base_flags |= 0x00000010;
7980 base_flags |= (hdr_len & 0x3e0) << 5;
7981 } else if (tg3_flag(tp, HW_TSO_2))
7982 mss |= hdr_len << 9;
7983 else if (tg3_flag(tp, HW_TSO_1) ||
7984 tg3_asic_rev(tp) == ASIC_REV_5705) {
7985 if (tcp_opt_len || iph->ihl > 5) {
7988 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7989 mss |= (tsflags << 11);
7992 if (tcp_opt_len || iph->ihl > 5) {
7995 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7996 base_flags |= tsflags << 12;
7999 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8000 /* HW/FW can not correctly checksum packets that have been
8001 * vlan encapsulated.
8003 if (skb->protocol == htons(ETH_P_8021Q) ||
8004 skb->protocol == htons(ETH_P_8021AD)) {
8005 if (skb_checksum_help(skb))
8008 base_flags |= TXD_FLAG_TCPUDP_CSUM;
8012 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8013 !mss && skb->len > VLAN_ETH_FRAME_LEN)
8014 base_flags |= TXD_FLAG_JMB_PKT;
8016 if (skb_vlan_tag_present(skb)) {
8017 base_flags |= TXD_FLAG_VLAN;
8018 vlan = skb_vlan_tag_get(skb);
8021 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8022 tg3_flag(tp, TX_TSTAMP_EN)) {
8023 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8024 base_flags |= TXD_FLAG_HWTSTAMP;
8027 len = skb_headlen(skb);
8029 mapping = dma_map_single(&tp->pdev->dev, skb->data, len,
8031 if (dma_mapping_error(&tp->pdev->dev, mapping))
8035 tnapi->tx_buffers[entry].skb = skb;
8036 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8038 would_hit_hwbug = 0;
8040 if (tg3_flag(tp, 5701_DMA_BUG))
8041 would_hit_hwbug = 1;
8043 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8044 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8046 would_hit_hwbug = 1;
8047 } else if (skb_shinfo(skb)->nr_frags > 0) {
8050 if (!tg3_flag(tp, HW_TSO_1) &&
8051 !tg3_flag(tp, HW_TSO_2) &&
8052 !tg3_flag(tp, HW_TSO_3))
8055 /* Now loop through additional data
8056 * fragments, and queue them.
8058 last = skb_shinfo(skb)->nr_frags - 1;
8059 for (i = 0; i <= last; i++) {
8060 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8062 len = skb_frag_size(frag);
8063 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8064 len, DMA_TO_DEVICE);
8066 tnapi->tx_buffers[entry].skb = NULL;
8067 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8069 if (dma_mapping_error(&tp->pdev->dev, mapping))
8073 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8075 ((i == last) ? TXD_FLAG_END : 0),
8077 would_hit_hwbug = 1;
8083 if (would_hit_hwbug) {
8084 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8086 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8087 /* If it's a TSO packet, do GSO instead of
8088 * allocating and copying to a large linear SKB
8091 iph->check = ip_csum;
8092 iph->tot_len = ip_tot_len;
8094 tcph->check = tcp_csum;
8095 return tg3_tso_bug(tp, tnapi, txq, skb);
8098 /* If the workaround fails due to memory/mapping
8099 * failure, silently drop this packet.
8101 entry = tnapi->tx_prod;
8102 budget = tg3_tx_avail(tnapi);
8103 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8104 base_flags, mss, vlan))
8108 skb_tx_timestamp(skb);
8109 netdev_tx_sent_queue(txq, skb->len);
8111 /* Sync BD data before updating mailbox */
8114 tnapi->tx_prod = entry;
8115 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8116 netif_tx_stop_queue(txq);
8118 /* netif_tx_stop_queue() must be done before checking
8119 * checking tx index in tg3_tx_avail() below, because in
8120 * tg3_tx(), we update tx index before checking for
8121 * netif_tx_queue_stopped().
8124 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8125 netif_tx_wake_queue(txq);
8128 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
8129 /* Packets are ready, update Tx producer idx on card. */
8130 tw32_tx_mbox(tnapi->prodmbox, entry);
8133 return NETDEV_TX_OK;
8136 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8137 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8139 dev_kfree_skb_any(skb);
8142 return NETDEV_TX_OK;
8145 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8148 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8149 MAC_MODE_PORT_MODE_MASK);
8151 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8153 if (!tg3_flag(tp, 5705_PLUS))
8154 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8156 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8157 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8159 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8161 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8163 if (tg3_flag(tp, 5705_PLUS) ||
8164 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8165 tg3_asic_rev(tp) == ASIC_REV_5700)
8166 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8169 tw32(MAC_MODE, tp->mac_mode);
8173 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8175 u32 val, bmcr, mac_mode, ptest = 0;
8177 tg3_phy_toggle_apd(tp, false);
8178 tg3_phy_toggle_automdix(tp, false);
8180 if (extlpbk && tg3_phy_set_extloopbk(tp))
8183 bmcr = BMCR_FULLDPLX;
8188 bmcr |= BMCR_SPEED100;
8192 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8194 bmcr |= BMCR_SPEED100;
8197 bmcr |= BMCR_SPEED1000;
8202 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8203 tg3_readphy(tp, MII_CTRL1000, &val);
8204 val |= CTL1000_AS_MASTER |
8205 CTL1000_ENABLE_MASTER;
8206 tg3_writephy(tp, MII_CTRL1000, val);
8208 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8209 MII_TG3_FET_PTEST_TRIM_2;
8210 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8213 bmcr |= BMCR_LOOPBACK;
8215 tg3_writephy(tp, MII_BMCR, bmcr);
8217 /* The write needs to be flushed for the FETs */
8218 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8219 tg3_readphy(tp, MII_BMCR, &bmcr);
8223 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8224 tg3_asic_rev(tp) == ASIC_REV_5785) {
8225 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8226 MII_TG3_FET_PTEST_FRC_TX_LINK |
8227 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8229 /* The write needs to be flushed for the AC131 */
8230 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8233 /* Reset to prevent losing 1st rx packet intermittently */
8234 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8235 tg3_flag(tp, 5780_CLASS)) {
8236 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8238 tw32_f(MAC_RX_MODE, tp->rx_mode);
8241 mac_mode = tp->mac_mode &
8242 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8243 if (speed == SPEED_1000)
8244 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8246 mac_mode |= MAC_MODE_PORT_MODE_MII;
8248 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8249 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8251 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8252 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8253 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8254 mac_mode |= MAC_MODE_LINK_POLARITY;
8256 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8257 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8260 tw32(MAC_MODE, mac_mode);
8266 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8268 struct tg3 *tp = netdev_priv(dev);
8270 if (features & NETIF_F_LOOPBACK) {
8271 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8274 spin_lock_bh(&tp->lock);
8275 tg3_mac_loopback(tp, true);
8276 netif_carrier_on(tp->dev);
8277 spin_unlock_bh(&tp->lock);
8278 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8280 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8283 spin_lock_bh(&tp->lock);
8284 tg3_mac_loopback(tp, false);
8285 /* Force link status check */
8286 tg3_setup_phy(tp, true);
8287 spin_unlock_bh(&tp->lock);
8288 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8292 static netdev_features_t tg3_fix_features(struct net_device *dev,
8293 netdev_features_t features)
8295 struct tg3 *tp = netdev_priv(dev);
8297 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8298 features &= ~NETIF_F_ALL_TSO;
8303 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8305 netdev_features_t changed = dev->features ^ features;
8307 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8308 tg3_set_loopback(dev, features);
8313 static void tg3_rx_prodring_free(struct tg3 *tp,
8314 struct tg3_rx_prodring_set *tpr)
8318 if (tpr != &tp->napi[0].prodring) {
8319 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8320 i = (i + 1) & tp->rx_std_ring_mask)
8321 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8324 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8325 for (i = tpr->rx_jmb_cons_idx;
8326 i != tpr->rx_jmb_prod_idx;
8327 i = (i + 1) & tp->rx_jmb_ring_mask) {
8328 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8336 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8337 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8340 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8341 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8342 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8347 /* Initialize rx rings for packet processing.
8349 * The chip has been shut down and the driver detached from
8350 * the networking, so no interrupts or new tx packets will
8351 * end up in the driver. tp->{tx,}lock are held and thus
8354 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8355 struct tg3_rx_prodring_set *tpr)
8357 u32 i, rx_pkt_dma_sz;
8359 tpr->rx_std_cons_idx = 0;
8360 tpr->rx_std_prod_idx = 0;
8361 tpr->rx_jmb_cons_idx = 0;
8362 tpr->rx_jmb_prod_idx = 0;
8364 if (tpr != &tp->napi[0].prodring) {
8365 memset(&tpr->rx_std_buffers[0], 0,
8366 TG3_RX_STD_BUFF_RING_SIZE(tp));
8367 if (tpr->rx_jmb_buffers)
8368 memset(&tpr->rx_jmb_buffers[0], 0,
8369 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8373 /* Zero out all descriptors. */
8374 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8376 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8377 if (tg3_flag(tp, 5780_CLASS) &&
8378 tp->dev->mtu > ETH_DATA_LEN)
8379 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8380 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8382 /* Initialize invariants of the rings, we only set this
8383 * stuff once. This works because the card does not
8384 * write into the rx buffer posting rings.
8386 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8387 struct tg3_rx_buffer_desc *rxd;
8389 rxd = &tpr->rx_std[i];
8390 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8391 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8392 rxd->opaque = (RXD_OPAQUE_RING_STD |
8393 (i << RXD_OPAQUE_INDEX_SHIFT));
8396 /* Now allocate fresh SKBs for each rx ring. */
8397 for (i = 0; i < tp->rx_pending; i++) {
8398 unsigned int frag_size;
8400 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8402 netdev_warn(tp->dev,
8403 "Using a smaller RX standard ring. Only "
8404 "%d out of %d buffers were allocated "
8405 "successfully\n", i, tp->rx_pending);
8413 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8416 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8418 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8421 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8422 struct tg3_rx_buffer_desc *rxd;
8424 rxd = &tpr->rx_jmb[i].std;
8425 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8426 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8428 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8429 (i << RXD_OPAQUE_INDEX_SHIFT));
8432 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8433 unsigned int frag_size;
8435 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8437 netdev_warn(tp->dev,
8438 "Using a smaller RX jumbo ring. Only %d "
8439 "out of %d buffers were allocated "
8440 "successfully\n", i, tp->rx_jumbo_pending);
8443 tp->rx_jumbo_pending = i;
8452 tg3_rx_prodring_free(tp, tpr);
8456 static void tg3_rx_prodring_fini(struct tg3 *tp,
8457 struct tg3_rx_prodring_set *tpr)
8459 kfree(tpr->rx_std_buffers);
8460 tpr->rx_std_buffers = NULL;
8461 kfree(tpr->rx_jmb_buffers);
8462 tpr->rx_jmb_buffers = NULL;
8464 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8465 tpr->rx_std, tpr->rx_std_mapping);
8469 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8470 tpr->rx_jmb, tpr->rx_jmb_mapping);
8475 static int tg3_rx_prodring_init(struct tg3 *tp,
8476 struct tg3_rx_prodring_set *tpr)
8478 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8480 if (!tpr->rx_std_buffers)
8483 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8484 TG3_RX_STD_RING_BYTES(tp),
8485 &tpr->rx_std_mapping,
8490 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8491 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8493 if (!tpr->rx_jmb_buffers)
8496 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8497 TG3_RX_JMB_RING_BYTES(tp),
8498 &tpr->rx_jmb_mapping,
8507 tg3_rx_prodring_fini(tp, tpr);
8511 /* Free up pending packets in all rx/tx rings.
8513 * The chip has been shut down and the driver detached from
8514 * the networking, so no interrupts or new tx packets will
8515 * end up in the driver. tp->{tx,}lock is not held and we are not
8516 * in an interrupt context and thus may sleep.
8518 static void tg3_free_rings(struct tg3 *tp)
8522 for (j = 0; j < tp->irq_cnt; j++) {
8523 struct tg3_napi *tnapi = &tp->napi[j];
8525 tg3_rx_prodring_free(tp, &tnapi->prodring);
8527 if (!tnapi->tx_buffers)
8530 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8531 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8536 tg3_tx_skb_unmap(tnapi, i,
8537 skb_shinfo(skb)->nr_frags - 1);
8539 dev_consume_skb_any(skb);
8541 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8545 /* Initialize tx/rx rings for packet processing.
8547 * The chip has been shut down and the driver detached from
8548 * the networking, so no interrupts or new tx packets will
8549 * end up in the driver. tp->{tx,}lock are held and thus
8552 static int tg3_init_rings(struct tg3 *tp)
8556 /* Free up all the SKBs. */
8559 for (i = 0; i < tp->irq_cnt; i++) {
8560 struct tg3_napi *tnapi = &tp->napi[i];
8562 tnapi->last_tag = 0;
8563 tnapi->last_irq_tag = 0;
8564 tnapi->hw_status->status = 0;
8565 tnapi->hw_status->status_tag = 0;
8566 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8571 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8573 tnapi->rx_rcb_ptr = 0;
8575 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8577 if (tnapi->prodring.rx_std &&
8578 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8587 static void tg3_mem_tx_release(struct tg3 *tp)
8591 for (i = 0; i < tp->irq_max; i++) {
8592 struct tg3_napi *tnapi = &tp->napi[i];
8594 if (tnapi->tx_ring) {
8595 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8596 tnapi->tx_ring, tnapi->tx_desc_mapping);
8597 tnapi->tx_ring = NULL;
8600 kfree(tnapi->tx_buffers);
8601 tnapi->tx_buffers = NULL;
8605 static int tg3_mem_tx_acquire(struct tg3 *tp)
8608 struct tg3_napi *tnapi = &tp->napi[0];
8610 /* If multivector TSS is enabled, vector 0 does not handle
8611 * tx interrupts. Don't allocate any resources for it.
8613 if (tg3_flag(tp, ENABLE_TSS))
8616 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8617 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8618 sizeof(struct tg3_tx_ring_info),
8620 if (!tnapi->tx_buffers)
8623 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8625 &tnapi->tx_desc_mapping,
8627 if (!tnapi->tx_ring)
8634 tg3_mem_tx_release(tp);
8638 static void tg3_mem_rx_release(struct tg3 *tp)
8642 for (i = 0; i < tp->irq_max; i++) {
8643 struct tg3_napi *tnapi = &tp->napi[i];
8645 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8650 dma_free_coherent(&tp->pdev->dev,
8651 TG3_RX_RCB_RING_BYTES(tp),
8653 tnapi->rx_rcb_mapping);
8654 tnapi->rx_rcb = NULL;
8658 static int tg3_mem_rx_acquire(struct tg3 *tp)
8660 unsigned int i, limit;
8662 limit = tp->rxq_cnt;
8664 /* If RSS is enabled, we need a (dummy) producer ring
8665 * set on vector zero. This is the true hw prodring.
8667 if (tg3_flag(tp, ENABLE_RSS))
8670 for (i = 0; i < limit; i++) {
8671 struct tg3_napi *tnapi = &tp->napi[i];
8673 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8676 /* If multivector RSS is enabled, vector 0
8677 * does not handle rx or tx interrupts.
8678 * Don't allocate any resources for it.
8680 if (!i && tg3_flag(tp, ENABLE_RSS))
8683 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8684 TG3_RX_RCB_RING_BYTES(tp),
8685 &tnapi->rx_rcb_mapping,
8694 tg3_mem_rx_release(tp);
8699 * Must not be invoked with interrupt sources disabled and
8700 * the hardware shutdown down.
8702 static void tg3_free_consistent(struct tg3 *tp)
8706 for (i = 0; i < tp->irq_cnt; i++) {
8707 struct tg3_napi *tnapi = &tp->napi[i];
8709 if (tnapi->hw_status) {
8710 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8712 tnapi->status_mapping);
8713 tnapi->hw_status = NULL;
8717 tg3_mem_rx_release(tp);
8718 tg3_mem_tx_release(tp);
8720 /* tp->hw_stats can be referenced safely:
8721 * 1. under rtnl_lock
8722 * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8725 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8726 tp->hw_stats, tp->stats_mapping);
8727 tp->hw_stats = NULL;
8732 * Must not be invoked with interrupt sources disabled and
8733 * the hardware shutdown down. Can sleep.
8735 static int tg3_alloc_consistent(struct tg3 *tp)
8739 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8740 sizeof(struct tg3_hw_stats),
8741 &tp->stats_mapping, GFP_KERNEL);
8745 for (i = 0; i < tp->irq_cnt; i++) {
8746 struct tg3_napi *tnapi = &tp->napi[i];
8747 struct tg3_hw_status *sblk;
8749 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8751 &tnapi->status_mapping,
8753 if (!tnapi->hw_status)
8756 sblk = tnapi->hw_status;
8758 if (tg3_flag(tp, ENABLE_RSS)) {
8759 u16 *prodptr = NULL;
8762 * When RSS is enabled, the status block format changes
8763 * slightly. The "rx_jumbo_consumer", "reserved",
8764 * and "rx_mini_consumer" members get mapped to the
8765 * other three rx return ring producer indexes.
8769 prodptr = &sblk->idx[0].rx_producer;
8772 prodptr = &sblk->rx_jumbo_consumer;
8775 prodptr = &sblk->reserved;
8778 prodptr = &sblk->rx_mini_consumer;
8781 tnapi->rx_rcb_prod_idx = prodptr;
8783 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8787 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8793 tg3_free_consistent(tp);
8797 #define MAX_WAIT_CNT 1000
8799 /* To stop a block, clear the enable bit and poll till it
8800 * clears. tp->lock is held.
8802 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8807 if (tg3_flag(tp, 5705_PLUS)) {
8814 /* We can't enable/disable these bits of the
8815 * 5705/5750, just say success.
8828 for (i = 0; i < MAX_WAIT_CNT; i++) {
8829 if (pci_channel_offline(tp->pdev)) {
8830 dev_err(&tp->pdev->dev,
8831 "tg3_stop_block device offline, "
8832 "ofs=%lx enable_bit=%x\n",
8839 if ((val & enable_bit) == 0)
8843 if (i == MAX_WAIT_CNT && !silent) {
8844 dev_err(&tp->pdev->dev,
8845 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8853 /* tp->lock is held. */
8854 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8858 tg3_disable_ints(tp);
8860 if (pci_channel_offline(tp->pdev)) {
8861 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8862 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8867 tp->rx_mode &= ~RX_MODE_ENABLE;
8868 tw32_f(MAC_RX_MODE, tp->rx_mode);
8871 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8872 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8873 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8874 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8875 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8876 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8878 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8879 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8880 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8881 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8882 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8883 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8884 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8886 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8887 tw32_f(MAC_MODE, tp->mac_mode);
8890 tp->tx_mode &= ~TX_MODE_ENABLE;
8891 tw32_f(MAC_TX_MODE, tp->tx_mode);
8893 for (i = 0; i < MAX_WAIT_CNT; i++) {
8895 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8898 if (i >= MAX_WAIT_CNT) {
8899 dev_err(&tp->pdev->dev,
8900 "%s timed out, TX_MODE_ENABLE will not clear "
8901 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8905 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8906 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8907 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8909 tw32(FTQ_RESET, 0xffffffff);
8910 tw32(FTQ_RESET, 0x00000000);
8912 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8913 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8916 for (i = 0; i < tp->irq_cnt; i++) {
8917 struct tg3_napi *tnapi = &tp->napi[i];
8918 if (tnapi->hw_status)
8919 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8925 /* Save PCI command register before chip reset */
8926 static void tg3_save_pci_state(struct tg3 *tp)
8928 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8931 /* Restore PCI state after chip reset */
8932 static void tg3_restore_pci_state(struct tg3 *tp)
8936 /* Re-enable indirect register accesses. */
8937 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8938 tp->misc_host_ctrl);
8940 /* Set MAX PCI retry to zero. */
8941 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8942 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8943 tg3_flag(tp, PCIX_MODE))
8944 val |= PCISTATE_RETRY_SAME_DMA;
8945 /* Allow reads and writes to the APE register and memory space. */
8946 if (tg3_flag(tp, ENABLE_APE))
8947 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8948 PCISTATE_ALLOW_APE_SHMEM_WR |
8949 PCISTATE_ALLOW_APE_PSPACE_WR;
8950 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8952 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8954 if (!tg3_flag(tp, PCI_EXPRESS)) {
8955 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8956 tp->pci_cacheline_sz);
8957 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8961 /* Make sure PCI-X relaxed ordering bit is clear. */
8962 if (tg3_flag(tp, PCIX_MODE)) {
8965 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8967 pcix_cmd &= ~PCI_X_CMD_ERO;
8968 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8972 if (tg3_flag(tp, 5780_CLASS)) {
8974 /* Chip reset on 5780 will reset MSI enable bit,
8975 * so need to restore it.
8977 if (tg3_flag(tp, USING_MSI)) {
8980 pci_read_config_word(tp->pdev,
8981 tp->msi_cap + PCI_MSI_FLAGS,
8983 pci_write_config_word(tp->pdev,
8984 tp->msi_cap + PCI_MSI_FLAGS,
8985 ctrl | PCI_MSI_FLAGS_ENABLE);
8986 val = tr32(MSGINT_MODE);
8987 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8992 static void tg3_override_clk(struct tg3 *tp)
8996 switch (tg3_asic_rev(tp)) {
8998 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
8999 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9000 TG3_CPMU_MAC_ORIDE_ENABLE);
9005 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9013 static void tg3_restore_clk(struct tg3 *tp)
9017 switch (tg3_asic_rev(tp)) {
9019 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9020 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9021 val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9026 val = tr32(TG3_CPMU_CLCK_ORIDE);
9027 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9035 /* tp->lock is held. */
9036 static int tg3_chip_reset(struct tg3 *tp)
9037 __releases(tp->lock)
9038 __acquires(tp->lock)
9041 void (*write_op)(struct tg3 *, u32, u32);
9044 if (!pci_device_is_present(tp->pdev))
9049 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9051 /* No matching tg3_nvram_unlock() after this because
9052 * chip reset below will undo the nvram lock.
9054 tp->nvram_lock_cnt = 0;
9056 /* GRC_MISC_CFG core clock reset will clear the memory
9057 * enable bit in PCI register 4 and the MSI enable bit
9058 * on some chips, so we save relevant registers here.
9060 tg3_save_pci_state(tp);
9062 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9063 tg3_flag(tp, 5755_PLUS))
9064 tw32(GRC_FASTBOOT_PC, 0);
9067 * We must avoid the readl() that normally takes place.
9068 * It locks machines, causes machine checks, and other
9069 * fun things. So, temporarily disable the 5701
9070 * hardware workaround, while we do the reset.
9072 write_op = tp->write32;
9073 if (write_op == tg3_write_flush_reg32)
9074 tp->write32 = tg3_write32;
9076 /* Prevent the irq handler from reading or writing PCI registers
9077 * during chip reset when the memory enable bit in the PCI command
9078 * register may be cleared. The chip does not generate interrupt
9079 * at this time, but the irq handler may still be called due to irq
9080 * sharing or irqpoll.
9082 tg3_flag_set(tp, CHIP_RESETTING);
9083 for (i = 0; i < tp->irq_cnt; i++) {
9084 struct tg3_napi *tnapi = &tp->napi[i];
9085 if (tnapi->hw_status) {
9086 tnapi->hw_status->status = 0;
9087 tnapi->hw_status->status_tag = 0;
9089 tnapi->last_tag = 0;
9090 tnapi->last_irq_tag = 0;
9094 tg3_full_unlock(tp);
9096 for (i = 0; i < tp->irq_cnt; i++)
9097 synchronize_irq(tp->napi[i].irq_vec);
9099 tg3_full_lock(tp, 0);
9101 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9102 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9103 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9107 val = GRC_MISC_CFG_CORECLK_RESET;
9109 if (tg3_flag(tp, PCI_EXPRESS)) {
9110 /* Force PCIe 1.0a mode */
9111 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9112 !tg3_flag(tp, 57765_PLUS) &&
9113 tr32(TG3_PCIE_PHY_TSTCTL) ==
9114 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9115 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9117 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9118 tw32(GRC_MISC_CFG, (1 << 29));
9123 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9124 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9125 tw32(GRC_VCPU_EXT_CTRL,
9126 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9129 /* Set the clock to the highest frequency to avoid timeouts. With link
9130 * aware mode, the clock speed could be slow and bootcode does not
9131 * complete within the expected time. Override the clock to allow the
9132 * bootcode to finish sooner and then restore it.
9134 tg3_override_clk(tp);
9136 /* Manage gphy power for all CPMU absent PCIe devices. */
9137 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9138 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9140 tw32(GRC_MISC_CFG, val);
9142 /* restore 5701 hardware bug workaround write method */
9143 tp->write32 = write_op;
9145 /* Unfortunately, we have to delay before the PCI read back.
9146 * Some 575X chips even will not respond to a PCI cfg access
9147 * when the reset command is given to the chip.
9149 * How do these hardware designers expect things to work
9150 * properly if the PCI write is posted for a long period
9151 * of time? It is always necessary to have some method by
9152 * which a register read back can occur to push the write
9153 * out which does the reset.
9155 * For most tg3 variants the trick below was working.
9160 /* Flush PCI posted writes. The normal MMIO registers
9161 * are inaccessible at this time so this is the only
9162 * way to make this reliably (actually, this is no longer
9163 * the case, see above). I tried to use indirect
9164 * register read/write but this upset some 5701 variants.
9166 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9170 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9173 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9177 /* Wait for link training to complete. */
9178 for (j = 0; j < 5000; j++)
9181 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9182 pci_write_config_dword(tp->pdev, 0xc4,
9183 cfg_val | (1 << 15));
9186 /* Clear the "no snoop" and "relaxed ordering" bits. */
9187 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9189 * Older PCIe devices only support the 128 byte
9190 * MPS setting. Enforce the restriction.
9192 if (!tg3_flag(tp, CPMU_PRESENT))
9193 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9194 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9196 /* Clear error status */
9197 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9198 PCI_EXP_DEVSTA_CED |
9199 PCI_EXP_DEVSTA_NFED |
9200 PCI_EXP_DEVSTA_FED |
9201 PCI_EXP_DEVSTA_URD);
9204 tg3_restore_pci_state(tp);
9206 tg3_flag_clear(tp, CHIP_RESETTING);
9207 tg3_flag_clear(tp, ERROR_PROCESSED);
9210 if (tg3_flag(tp, 5780_CLASS))
9211 val = tr32(MEMARB_MODE);
9212 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9214 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9216 tw32(0x5000, 0x400);
9219 if (tg3_flag(tp, IS_SSB_CORE)) {
9221 * BCM4785: In order to avoid repercussions from using
9222 * potentially defective internal ROM, stop the Rx RISC CPU,
9223 * which is not required.
9226 tg3_halt_cpu(tp, RX_CPU_BASE);
9229 err = tg3_poll_fw(tp);
9233 tw32(GRC_MODE, tp->grc_mode);
9235 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9238 tw32(0xc4, val | (1 << 15));
9241 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9242 tg3_asic_rev(tp) == ASIC_REV_5705) {
9243 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9244 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9245 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9246 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9249 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9250 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9252 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9253 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9258 tw32_f(MAC_MODE, val);
9261 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9265 if (tg3_flag(tp, PCI_EXPRESS) &&
9266 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9267 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9268 !tg3_flag(tp, 57765_PLUS)) {
9271 tw32(0x7c00, val | (1 << 25));
9274 tg3_restore_clk(tp);
9276 /* Increase the core clock speed to fix tx timeout issue for 5762
9277 * with 100Mbps link speed.
9279 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9280 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9281 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9282 TG3_CPMU_MAC_ORIDE_ENABLE);
9285 /* Reprobe ASF enable state. */
9286 tg3_flag_clear(tp, ENABLE_ASF);
9287 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9288 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9290 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9291 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9292 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9295 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9296 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9297 tg3_flag_set(tp, ENABLE_ASF);
9298 tp->last_event_jiffies = jiffies;
9299 if (tg3_flag(tp, 5750_PLUS))
9300 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9302 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9303 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9304 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9305 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9306 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9313 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9314 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9315 static void __tg3_set_rx_mode(struct net_device *);
9317 /* tp->lock is held. */
9318 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9324 tg3_write_sig_pre_reset(tp, kind);
9326 tg3_abort_hw(tp, silent);
9327 err = tg3_chip_reset(tp);
9329 __tg3_set_mac_addr(tp, false);
9331 tg3_write_sig_legacy(tp, kind);
9332 tg3_write_sig_post_reset(tp, kind);
9335 /* Save the stats across chip resets... */
9336 tg3_get_nstats(tp, &tp->net_stats_prev);
9337 tg3_get_estats(tp, &tp->estats_prev);
9339 /* And make sure the next sample is new data */
9340 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9346 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9348 struct tg3 *tp = netdev_priv(dev);
9349 struct sockaddr *addr = p;
9351 bool skip_mac_1 = false;
9353 if (!is_valid_ether_addr(addr->sa_data))
9354 return -EADDRNOTAVAIL;
9356 eth_hw_addr_set(dev, addr->sa_data);
9358 if (!netif_running(dev))
9361 if (tg3_flag(tp, ENABLE_ASF)) {
9362 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9364 addr0_high = tr32(MAC_ADDR_0_HIGH);
9365 addr0_low = tr32(MAC_ADDR_0_LOW);
9366 addr1_high = tr32(MAC_ADDR_1_HIGH);
9367 addr1_low = tr32(MAC_ADDR_1_LOW);
9369 /* Skip MAC addr 1 if ASF is using it. */
9370 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9371 !(addr1_high == 0 && addr1_low == 0))
9374 spin_lock_bh(&tp->lock);
9375 __tg3_set_mac_addr(tp, skip_mac_1);
9376 __tg3_set_rx_mode(dev);
9377 spin_unlock_bh(&tp->lock);
9382 /* tp->lock is held. */
9383 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9384 dma_addr_t mapping, u32 maxlen_flags,
9388 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9389 ((u64) mapping >> 32));
9391 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9392 ((u64) mapping & 0xffffffff));
9394 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9397 if (!tg3_flag(tp, 5705_PLUS))
9399 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9404 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9408 if (!tg3_flag(tp, ENABLE_TSS)) {
9409 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9410 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9411 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9413 tw32(HOSTCC_TXCOL_TICKS, 0);
9414 tw32(HOSTCC_TXMAX_FRAMES, 0);
9415 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9417 for (; i < tp->txq_cnt; i++) {
9420 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9421 tw32(reg, ec->tx_coalesce_usecs);
9422 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9423 tw32(reg, ec->tx_max_coalesced_frames);
9424 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9425 tw32(reg, ec->tx_max_coalesced_frames_irq);
9429 for (; i < tp->irq_max - 1; i++) {
9430 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9431 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9432 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9436 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9439 u32 limit = tp->rxq_cnt;
9441 if (!tg3_flag(tp, ENABLE_RSS)) {
9442 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9443 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9444 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9447 tw32(HOSTCC_RXCOL_TICKS, 0);
9448 tw32(HOSTCC_RXMAX_FRAMES, 0);
9449 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9452 for (; i < limit; i++) {
9455 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9456 tw32(reg, ec->rx_coalesce_usecs);
9457 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9458 tw32(reg, ec->rx_max_coalesced_frames);
9459 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9460 tw32(reg, ec->rx_max_coalesced_frames_irq);
9463 for (; i < tp->irq_max - 1; i++) {
9464 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9465 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9466 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9470 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9472 tg3_coal_tx_init(tp, ec);
9473 tg3_coal_rx_init(tp, ec);
9475 if (!tg3_flag(tp, 5705_PLUS)) {
9476 u32 val = ec->stats_block_coalesce_usecs;
9478 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9479 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9484 tw32(HOSTCC_STAT_COAL_TICKS, val);
9488 /* tp->lock is held. */
9489 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9493 /* Disable all transmit rings but the first. */
9494 if (!tg3_flag(tp, 5705_PLUS))
9495 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9496 else if (tg3_flag(tp, 5717_PLUS))
9497 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9498 else if (tg3_flag(tp, 57765_CLASS) ||
9499 tg3_asic_rev(tp) == ASIC_REV_5762)
9500 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9502 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9504 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9505 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9506 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9507 BDINFO_FLAGS_DISABLED);
9510 /* tp->lock is held. */
9511 static void tg3_tx_rcbs_init(struct tg3 *tp)
9514 u32 txrcb = NIC_SRAM_SEND_RCB;
9516 if (tg3_flag(tp, ENABLE_TSS))
9519 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9520 struct tg3_napi *tnapi = &tp->napi[i];
9522 if (!tnapi->tx_ring)
9525 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9526 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9527 NIC_SRAM_TX_BUFFER_DESC);
9531 /* tp->lock is held. */
9532 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9536 /* Disable all receive return rings but the first. */
9537 if (tg3_flag(tp, 5717_PLUS))
9538 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9539 else if (!tg3_flag(tp, 5705_PLUS))
9540 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9541 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9542 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9543 tg3_flag(tp, 57765_CLASS))
9544 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9546 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9548 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9549 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9550 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9551 BDINFO_FLAGS_DISABLED);
9554 /* tp->lock is held. */
9555 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9558 u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9560 if (tg3_flag(tp, ENABLE_RSS))
9563 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9564 struct tg3_napi *tnapi = &tp->napi[i];
9569 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9570 (tp->rx_ret_ring_mask + 1) <<
9571 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9575 /* tp->lock is held. */
9576 static void tg3_rings_reset(struct tg3 *tp)
9580 struct tg3_napi *tnapi = &tp->napi[0];
9582 tg3_tx_rcbs_disable(tp);
9584 tg3_rx_ret_rcbs_disable(tp);
9586 /* Disable interrupts */
9587 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9588 tp->napi[0].chk_msi_cnt = 0;
9589 tp->napi[0].last_rx_cons = 0;
9590 tp->napi[0].last_tx_cons = 0;
9592 /* Zero mailbox registers. */
9593 if (tg3_flag(tp, SUPPORT_MSIX)) {
9594 for (i = 1; i < tp->irq_max; i++) {
9595 tp->napi[i].tx_prod = 0;
9596 tp->napi[i].tx_cons = 0;
9597 if (tg3_flag(tp, ENABLE_TSS))
9598 tw32_mailbox(tp->napi[i].prodmbox, 0);
9599 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9600 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9601 tp->napi[i].chk_msi_cnt = 0;
9602 tp->napi[i].last_rx_cons = 0;
9603 tp->napi[i].last_tx_cons = 0;
9605 if (!tg3_flag(tp, ENABLE_TSS))
9606 tw32_mailbox(tp->napi[0].prodmbox, 0);
9608 tp->napi[0].tx_prod = 0;
9609 tp->napi[0].tx_cons = 0;
9610 tw32_mailbox(tp->napi[0].prodmbox, 0);
9611 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9614 /* Make sure the NIC-based send BD rings are disabled. */
9615 if (!tg3_flag(tp, 5705_PLUS)) {
9616 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9617 for (i = 0; i < 16; i++)
9618 tw32_tx_mbox(mbox + i * 8, 0);
9621 /* Clear status block in ram. */
9622 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9624 /* Set status block DMA address */
9625 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9626 ((u64) tnapi->status_mapping >> 32));
9627 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9628 ((u64) tnapi->status_mapping & 0xffffffff));
9630 stblk = HOSTCC_STATBLCK_RING1;
9632 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9633 u64 mapping = (u64)tnapi->status_mapping;
9634 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9635 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9638 /* Clear status block in ram. */
9639 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9642 tg3_tx_rcbs_init(tp);
9643 tg3_rx_ret_rcbs_init(tp);
9646 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9648 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9650 if (!tg3_flag(tp, 5750_PLUS) ||
9651 tg3_flag(tp, 5780_CLASS) ||
9652 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9653 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9654 tg3_flag(tp, 57765_PLUS))
9655 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9656 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9657 tg3_asic_rev(tp) == ASIC_REV_5787)
9658 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9660 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9662 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9663 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9665 val = min(nic_rep_thresh, host_rep_thresh);
9666 tw32(RCVBDI_STD_THRESH, val);
9668 if (tg3_flag(tp, 57765_PLUS))
9669 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9671 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9674 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9676 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9678 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9679 tw32(RCVBDI_JUMBO_THRESH, val);
9681 if (tg3_flag(tp, 57765_PLUS))
9682 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9685 static inline u32 calc_crc(unsigned char *buf, int len)
9693 for (j = 0; j < len; j++) {
9696 for (k = 0; k < 8; k++) {
9702 reg ^= CRC32_POLY_LE;
9709 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9711 /* accept or reject all multicast frames */
9712 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9713 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9714 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9715 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9718 static void __tg3_set_rx_mode(struct net_device *dev)
9720 struct tg3 *tp = netdev_priv(dev);
9723 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9724 RX_MODE_KEEP_VLAN_TAG);
9726 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9727 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9730 if (!tg3_flag(tp, ENABLE_ASF))
9731 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9734 if (dev->flags & IFF_PROMISC) {
9735 /* Promiscuous mode. */
9736 rx_mode |= RX_MODE_PROMISC;
9737 } else if (dev->flags & IFF_ALLMULTI) {
9738 /* Accept all multicast. */
9739 tg3_set_multi(tp, 1);
9740 } else if (netdev_mc_empty(dev)) {
9741 /* Reject all multicast. */
9742 tg3_set_multi(tp, 0);
9744 /* Accept one or more multicast(s). */
9745 struct netdev_hw_addr *ha;
9746 u32 mc_filter[4] = { 0, };
9751 netdev_for_each_mc_addr(ha, dev) {
9752 crc = calc_crc(ha->addr, ETH_ALEN);
9754 regidx = (bit & 0x60) >> 5;
9756 mc_filter[regidx] |= (1 << bit);
9759 tw32(MAC_HASH_REG_0, mc_filter[0]);
9760 tw32(MAC_HASH_REG_1, mc_filter[1]);
9761 tw32(MAC_HASH_REG_2, mc_filter[2]);
9762 tw32(MAC_HASH_REG_3, mc_filter[3]);
9765 if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9766 rx_mode |= RX_MODE_PROMISC;
9767 } else if (!(dev->flags & IFF_PROMISC)) {
9768 /* Add all entries into to the mac addr filter list */
9770 struct netdev_hw_addr *ha;
9772 netdev_for_each_uc_addr(ha, dev) {
9773 __tg3_set_one_mac_addr(tp, ha->addr,
9774 i + TG3_UCAST_ADDR_IDX(tp));
9779 if (rx_mode != tp->rx_mode) {
9780 tp->rx_mode = rx_mode;
9781 tw32_f(MAC_RX_MODE, rx_mode);
9786 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9790 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9791 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9794 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9798 if (!tg3_flag(tp, SUPPORT_MSIX))
9801 if (tp->rxq_cnt == 1) {
9802 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9806 /* Validate table against current IRQ count */
9807 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9808 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9812 if (i != TG3_RSS_INDIR_TBL_SIZE)
9813 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9816 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9819 u32 reg = MAC_RSS_INDIR_TBL_0;
9821 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9822 u32 val = tp->rss_ind_tbl[i];
9824 for (; i % 8; i++) {
9826 val |= tp->rss_ind_tbl[i];
9833 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9835 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9836 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9838 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9841 /* tp->lock is held. */
9842 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9844 u32 val, rdmac_mode;
9846 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9848 tg3_disable_ints(tp);
9852 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9854 if (tg3_flag(tp, INIT_COMPLETE))
9855 tg3_abort_hw(tp, 1);
9857 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9858 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9859 tg3_phy_pull_config(tp);
9860 tg3_eee_pull_config(tp, NULL);
9861 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9864 /* Enable MAC control of LPI */
9865 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9871 err = tg3_chip_reset(tp);
9875 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9877 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9878 val = tr32(TG3_CPMU_CTRL);
9879 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9880 tw32(TG3_CPMU_CTRL, val);
9882 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9883 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9884 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9885 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9887 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9888 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9889 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9890 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9892 val = tr32(TG3_CPMU_HST_ACC);
9893 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9894 val |= CPMU_HST_ACC_MACCLK_6_25;
9895 tw32(TG3_CPMU_HST_ACC, val);
9898 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9899 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9900 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9901 PCIE_PWR_MGMT_L1_THRESH_4MS;
9902 tw32(PCIE_PWR_MGMT_THRESH, val);
9904 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9905 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9907 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9909 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9910 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9913 if (tg3_flag(tp, L1PLLPD_EN)) {
9914 u32 grc_mode = tr32(GRC_MODE);
9916 /* Access the lower 1K of PL PCIE block registers. */
9917 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9918 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9920 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9921 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9922 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9924 tw32(GRC_MODE, grc_mode);
9927 if (tg3_flag(tp, 57765_CLASS)) {
9928 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9929 u32 grc_mode = tr32(GRC_MODE);
9931 /* Access the lower 1K of PL PCIE block registers. */
9932 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9933 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9935 val = tr32(TG3_PCIE_TLDLPL_PORT +
9936 TG3_PCIE_PL_LO_PHYCTL5);
9937 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9938 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9940 tw32(GRC_MODE, grc_mode);
9943 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9946 /* Fix transmit hangs */
9947 val = tr32(TG3_CPMU_PADRNG_CTL);
9948 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9949 tw32(TG3_CPMU_PADRNG_CTL, val);
9951 grc_mode = tr32(GRC_MODE);
9953 /* Access the lower 1K of DL PCIE block registers. */
9954 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9955 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9957 val = tr32(TG3_PCIE_TLDLPL_PORT +
9958 TG3_PCIE_DL_LO_FTSMAX);
9959 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9960 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9961 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9963 tw32(GRC_MODE, grc_mode);
9966 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9967 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9968 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9969 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9972 /* This works around an issue with Athlon chipsets on
9973 * B3 tigon3 silicon. This bit has no effect on any
9974 * other revision. But do not set this on PCI Express
9975 * chips and don't even touch the clocks if the CPMU is present.
9977 if (!tg3_flag(tp, CPMU_PRESENT)) {
9978 if (!tg3_flag(tp, PCI_EXPRESS))
9979 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9980 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9983 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9984 tg3_flag(tp, PCIX_MODE)) {
9985 val = tr32(TG3PCI_PCISTATE);
9986 val |= PCISTATE_RETRY_SAME_DMA;
9987 tw32(TG3PCI_PCISTATE, val);
9990 if (tg3_flag(tp, ENABLE_APE)) {
9991 /* Allow reads and writes to the
9992 * APE register and memory space.
9994 val = tr32(TG3PCI_PCISTATE);
9995 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9996 PCISTATE_ALLOW_APE_SHMEM_WR |
9997 PCISTATE_ALLOW_APE_PSPACE_WR;
9998 tw32(TG3PCI_PCISTATE, val);
10001 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10002 /* Enable some hw fixes. */
10003 val = tr32(TG3PCI_MSI_DATA);
10004 val |= (1 << 26) | (1 << 28) | (1 << 29);
10005 tw32(TG3PCI_MSI_DATA, val);
10008 /* Descriptor ring init may make accesses to the
10009 * NIC SRAM area to setup the TX descriptors, so we
10010 * can only do this after the hardware has been
10011 * successfully reset.
10013 err = tg3_init_rings(tp);
10017 if (tg3_flag(tp, 57765_PLUS)) {
10018 val = tr32(TG3PCI_DMA_RW_CTRL) &
10019 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10020 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10021 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10022 if (!tg3_flag(tp, 57765_CLASS) &&
10023 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10024 tg3_asic_rev(tp) != ASIC_REV_5762)
10025 val |= DMA_RWCTRL_TAGGED_STAT_WA;
10026 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10027 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10028 tg3_asic_rev(tp) != ASIC_REV_5761) {
10029 /* This value is determined during the probe time DMA
10030 * engine test, tg3_test_dma.
10032 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10035 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10036 GRC_MODE_4X_NIC_SEND_RINGS |
10037 GRC_MODE_NO_TX_PHDR_CSUM |
10038 GRC_MODE_NO_RX_PHDR_CSUM);
10039 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10041 /* Pseudo-header checksum is done by hardware logic and not
10042 * the offload processers, so make the chip do the pseudo-
10043 * header checksums on receive. For transmit it is more
10044 * convenient to do the pseudo-header checksum in software
10045 * as Linux does that on transmit for us in all cases.
10047 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10049 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10051 tw32(TG3_RX_PTP_CTL,
10052 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10054 if (tg3_flag(tp, PTP_CAPABLE))
10055 val |= GRC_MODE_TIME_SYNC_ENABLE;
10057 tw32(GRC_MODE, tp->grc_mode | val);
10059 /* On one of the AMD platform, MRRS is restricted to 4000 because of
10060 * south bridge limitation. As a workaround, Driver is setting MRRS
10061 * to 2048 instead of default 4096.
10063 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10064 tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10065 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10066 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10069 /* Setup the timer prescalar register. Clock is always 66Mhz. */
10070 val = tr32(GRC_MISC_CFG);
10072 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10073 tw32(GRC_MISC_CFG, val);
10075 /* Initialize MBUF/DESC pool. */
10076 if (tg3_flag(tp, 5750_PLUS)) {
10078 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10079 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10080 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10081 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10083 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10084 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10085 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10086 } else if (tg3_flag(tp, TSO_CAPABLE)) {
10089 fw_len = tp->fw_len;
10090 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10091 tw32(BUFMGR_MB_POOL_ADDR,
10092 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10093 tw32(BUFMGR_MB_POOL_SIZE,
10094 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10097 if (tp->dev->mtu <= ETH_DATA_LEN) {
10098 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10099 tp->bufmgr_config.mbuf_read_dma_low_water);
10100 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10101 tp->bufmgr_config.mbuf_mac_rx_low_water);
10102 tw32(BUFMGR_MB_HIGH_WATER,
10103 tp->bufmgr_config.mbuf_high_water);
10105 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10106 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10107 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10108 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10109 tw32(BUFMGR_MB_HIGH_WATER,
10110 tp->bufmgr_config.mbuf_high_water_jumbo);
10112 tw32(BUFMGR_DMA_LOW_WATER,
10113 tp->bufmgr_config.dma_low_water);
10114 tw32(BUFMGR_DMA_HIGH_WATER,
10115 tp->bufmgr_config.dma_high_water);
10117 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10118 if (tg3_asic_rev(tp) == ASIC_REV_5719)
10119 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10120 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10121 tg3_asic_rev(tp) == ASIC_REV_5762 ||
10122 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10123 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10124 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10125 tw32(BUFMGR_MODE, val);
10126 for (i = 0; i < 2000; i++) {
10127 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10132 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10136 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10137 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10139 tg3_setup_rxbd_thresholds(tp);
10141 /* Initialize TG3_BDINFO's at:
10142 * RCVDBDI_STD_BD: standard eth size rx ring
10143 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
10144 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
10147 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
10148 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
10149 * ring attribute flags
10150 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
10152 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10153 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10155 * The size of each ring is fixed in the firmware, but the location is
10158 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10159 ((u64) tpr->rx_std_mapping >> 32));
10160 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10161 ((u64) tpr->rx_std_mapping & 0xffffffff));
10162 if (!tg3_flag(tp, 5717_PLUS))
10163 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10164 NIC_SRAM_RX_BUFFER_DESC);
10166 /* Disable the mini ring */
10167 if (!tg3_flag(tp, 5705_PLUS))
10168 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10169 BDINFO_FLAGS_DISABLED);
10171 /* Program the jumbo buffer descriptor ring control
10172 * blocks on those devices that have them.
10174 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10175 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10177 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10178 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10179 ((u64) tpr->rx_jmb_mapping >> 32));
10180 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10181 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10182 val = TG3_RX_JMB_RING_SIZE(tp) <<
10183 BDINFO_FLAGS_MAXLEN_SHIFT;
10184 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10185 val | BDINFO_FLAGS_USE_EXT_RECV);
10186 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10187 tg3_flag(tp, 57765_CLASS) ||
10188 tg3_asic_rev(tp) == ASIC_REV_5762)
10189 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10190 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10192 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10193 BDINFO_FLAGS_DISABLED);
10196 if (tg3_flag(tp, 57765_PLUS)) {
10197 val = TG3_RX_STD_RING_SIZE(tp);
10198 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10199 val |= (TG3_RX_STD_DMA_SZ << 2);
10201 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10203 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10205 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10207 tpr->rx_std_prod_idx = tp->rx_pending;
10208 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10210 tpr->rx_jmb_prod_idx =
10211 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10212 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10214 tg3_rings_reset(tp);
10216 /* Initialize MAC address and backoff seed. */
10217 __tg3_set_mac_addr(tp, false);
10219 /* MTU + ethernet header + FCS + optional VLAN tag */
10220 tw32(MAC_RX_MTU_SIZE,
10221 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10223 /* The slot time is changed by tg3_setup_phy if we
10224 * run at gigabit with half duplex.
10226 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10227 (6 << TX_LENGTHS_IPG_SHIFT) |
10228 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10230 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10231 tg3_asic_rev(tp) == ASIC_REV_5762)
10232 val |= tr32(MAC_TX_LENGTHS) &
10233 (TX_LENGTHS_JMB_FRM_LEN_MSK |
10234 TX_LENGTHS_CNT_DWN_VAL_MSK);
10236 tw32(MAC_TX_LENGTHS, val);
10238 /* Receive rules. */
10239 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10240 tw32(RCVLPC_CONFIG, 0x0181);
10242 /* Calculate RDMAC_MODE setting early, we need it to determine
10243 * the RCVLPC_STATE_ENABLE mask.
10245 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10246 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10247 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10248 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10249 RDMAC_MODE_LNGREAD_ENAB);
10251 if (tg3_asic_rev(tp) == ASIC_REV_5717)
10252 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10254 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10255 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10256 tg3_asic_rev(tp) == ASIC_REV_57780)
10257 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10258 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10259 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10261 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10262 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10263 if (tg3_flag(tp, TSO_CAPABLE)) {
10264 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10265 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10266 !tg3_flag(tp, IS_5788)) {
10267 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10271 if (tg3_flag(tp, PCI_EXPRESS))
10272 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10274 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10276 if (tp->dev->mtu <= ETH_DATA_LEN) {
10277 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10278 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10282 if (tg3_flag(tp, HW_TSO_1) ||
10283 tg3_flag(tp, HW_TSO_2) ||
10284 tg3_flag(tp, HW_TSO_3))
10285 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10287 if (tg3_flag(tp, 57765_PLUS) ||
10288 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10289 tg3_asic_rev(tp) == ASIC_REV_57780)
10290 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10292 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10293 tg3_asic_rev(tp) == ASIC_REV_5762)
10294 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10296 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10297 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10298 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10299 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10300 tg3_flag(tp, 57765_PLUS)) {
10303 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10304 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10306 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10308 val = tr32(tgtreg);
10309 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10310 tg3_asic_rev(tp) == ASIC_REV_5762) {
10311 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10312 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10313 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10314 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10315 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10316 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10318 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10321 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10322 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10323 tg3_asic_rev(tp) == ASIC_REV_5762) {
10326 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10327 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10329 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10331 val = tr32(tgtreg);
10333 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10334 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10337 /* Receive/send statistics. */
10338 if (tg3_flag(tp, 5750_PLUS)) {
10339 val = tr32(RCVLPC_STATS_ENABLE);
10340 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10341 tw32(RCVLPC_STATS_ENABLE, val);
10342 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10343 tg3_flag(tp, TSO_CAPABLE)) {
10344 val = tr32(RCVLPC_STATS_ENABLE);
10345 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10346 tw32(RCVLPC_STATS_ENABLE, val);
10348 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10350 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10351 tw32(SNDDATAI_STATSENAB, 0xffffff);
10352 tw32(SNDDATAI_STATSCTRL,
10353 (SNDDATAI_SCTRL_ENABLE |
10354 SNDDATAI_SCTRL_FASTUPD));
10356 /* Setup host coalescing engine. */
10357 tw32(HOSTCC_MODE, 0);
10358 for (i = 0; i < 2000; i++) {
10359 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10364 __tg3_set_coalesce(tp, &tp->coal);
10366 if (!tg3_flag(tp, 5705_PLUS)) {
10367 /* Status/statistics block address. See tg3_timer,
10368 * the tg3_periodic_fetch_stats call there, and
10369 * tg3_get_stats to see how this works for 5705/5750 chips.
10371 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10372 ((u64) tp->stats_mapping >> 32));
10373 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10374 ((u64) tp->stats_mapping & 0xffffffff));
10375 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10377 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10379 /* Clear statistics and status block memory areas */
10380 for (i = NIC_SRAM_STATS_BLK;
10381 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10382 i += sizeof(u32)) {
10383 tg3_write_mem(tp, i, 0);
10388 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10390 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10391 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10392 if (!tg3_flag(tp, 5705_PLUS))
10393 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10395 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10396 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10397 /* reset to prevent losing 1st rx packet intermittently */
10398 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10402 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10403 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10404 MAC_MODE_FHDE_ENABLE;
10405 if (tg3_flag(tp, ENABLE_APE))
10406 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10407 if (!tg3_flag(tp, 5705_PLUS) &&
10408 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10409 tg3_asic_rev(tp) != ASIC_REV_5700)
10410 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10411 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10414 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10415 * If TG3_FLAG_IS_NIC is zero, we should read the
10416 * register to preserve the GPIO settings for LOMs. The GPIOs,
10417 * whether used as inputs or outputs, are set by boot code after
10420 if (!tg3_flag(tp, IS_NIC)) {
10423 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10424 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10425 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10427 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10428 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10429 GRC_LCLCTRL_GPIO_OUTPUT3;
10431 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10432 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10434 tp->grc_local_ctrl &= ~gpio_mask;
10435 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10437 /* GPIO1 must be driven high for eeprom write protect */
10438 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10439 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10440 GRC_LCLCTRL_GPIO_OUTPUT1);
10442 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10445 if (tg3_flag(tp, USING_MSIX)) {
10446 val = tr32(MSGINT_MODE);
10447 val |= MSGINT_MODE_ENABLE;
10448 if (tp->irq_cnt > 1)
10449 val |= MSGINT_MODE_MULTIVEC_EN;
10450 if (!tg3_flag(tp, 1SHOT_MSI))
10451 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10452 tw32(MSGINT_MODE, val);
10455 if (!tg3_flag(tp, 5705_PLUS)) {
10456 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10460 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10461 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10462 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10463 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10464 WDMAC_MODE_LNGREAD_ENAB);
10466 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10467 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10468 if (tg3_flag(tp, TSO_CAPABLE) &&
10469 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10470 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10472 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10473 !tg3_flag(tp, IS_5788)) {
10474 val |= WDMAC_MODE_RX_ACCEL;
10478 /* Enable host coalescing bug fix */
10479 if (tg3_flag(tp, 5755_PLUS))
10480 val |= WDMAC_MODE_STATUS_TAG_FIX;
10482 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10483 val |= WDMAC_MODE_BURST_ALL_DATA;
10485 tw32_f(WDMAC_MODE, val);
10488 if (tg3_flag(tp, PCIX_MODE)) {
10491 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10493 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10494 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10495 pcix_cmd |= PCI_X_CMD_READ_2K;
10496 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10497 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10498 pcix_cmd |= PCI_X_CMD_READ_2K;
10500 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10504 tw32_f(RDMAC_MODE, rdmac_mode);
10507 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10508 tg3_asic_rev(tp) == ASIC_REV_5720) {
10509 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10510 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10513 if (i < TG3_NUM_RDMA_CHANNELS) {
10514 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10515 val |= tg3_lso_rd_dma_workaround_bit(tp);
10516 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10517 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10521 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10522 if (!tg3_flag(tp, 5705_PLUS))
10523 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10525 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10526 tw32(SNDDATAC_MODE,
10527 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10529 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10531 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10532 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10533 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10534 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10535 val |= RCVDBDI_MODE_LRG_RING_SZ;
10536 tw32(RCVDBDI_MODE, val);
10537 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10538 if (tg3_flag(tp, HW_TSO_1) ||
10539 tg3_flag(tp, HW_TSO_2) ||
10540 tg3_flag(tp, HW_TSO_3))
10541 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10542 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10543 if (tg3_flag(tp, ENABLE_TSS))
10544 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10545 tw32(SNDBDI_MODE, val);
10546 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10548 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10549 err = tg3_load_5701_a0_firmware_fix(tp);
10554 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10555 /* Ignore any errors for the firmware download. If download
10556 * fails, the device will operate with EEE disabled
10558 tg3_load_57766_firmware(tp);
10561 if (tg3_flag(tp, TSO_CAPABLE)) {
10562 err = tg3_load_tso_firmware(tp);
10567 tp->tx_mode = TX_MODE_ENABLE;
10569 if (tg3_flag(tp, 5755_PLUS) ||
10570 tg3_asic_rev(tp) == ASIC_REV_5906)
10571 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10573 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10574 tg3_asic_rev(tp) == ASIC_REV_5762) {
10575 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10576 tp->tx_mode &= ~val;
10577 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10580 tw32_f(MAC_TX_MODE, tp->tx_mode);
10583 if (tg3_flag(tp, ENABLE_RSS)) {
10586 tg3_rss_write_indir_tbl(tp);
10588 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10590 for (i = 0; i < 10 ; i++)
10591 tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10594 tp->rx_mode = RX_MODE_ENABLE;
10595 if (tg3_flag(tp, 5755_PLUS))
10596 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10598 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10599 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10601 if (tg3_flag(tp, ENABLE_RSS))
10602 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10603 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10604 RX_MODE_RSS_IPV6_HASH_EN |
10605 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10606 RX_MODE_RSS_IPV4_HASH_EN |
10607 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10609 tw32_f(MAC_RX_MODE, tp->rx_mode);
10612 tw32(MAC_LED_CTRL, tp->led_ctrl);
10614 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10615 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10616 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10619 tw32_f(MAC_RX_MODE, tp->rx_mode);
10622 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10623 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10624 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10625 /* Set drive transmission level to 1.2V */
10626 /* only if the signal pre-emphasis bit is not set */
10627 val = tr32(MAC_SERDES_CFG);
10630 tw32(MAC_SERDES_CFG, val);
10632 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10633 tw32(MAC_SERDES_CFG, 0x616000);
10636 /* Prevent chip from dropping frames when flow control
10639 if (tg3_flag(tp, 57765_CLASS))
10643 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10645 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10646 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10647 /* Use hardware link auto-negotiation */
10648 tg3_flag_set(tp, HW_AUTONEG);
10651 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10652 tg3_asic_rev(tp) == ASIC_REV_5714) {
10655 tmp = tr32(SERDES_RX_CTRL);
10656 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10657 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10658 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10659 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10662 if (!tg3_flag(tp, USE_PHYLIB)) {
10663 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10664 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10666 err = tg3_setup_phy(tp, false);
10670 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10671 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10674 /* Clear CRC stats. */
10675 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10676 tg3_writephy(tp, MII_TG3_TEST1,
10677 tmp | MII_TG3_TEST1_CRC_EN);
10678 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10683 __tg3_set_rx_mode(tp->dev);
10685 /* Initialize receive rules. */
10686 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10687 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10688 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10689 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10691 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10695 if (tg3_flag(tp, ENABLE_ASF))
10699 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10702 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10705 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10708 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10711 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10714 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10717 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10720 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10723 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10726 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10729 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10732 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10735 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10737 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10745 if (tg3_flag(tp, ENABLE_APE))
10746 /* Write our heartbeat update interval to APE. */
10747 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10748 APE_HOST_HEARTBEAT_INT_5SEC);
10750 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10755 /* Called at device open time to get the chip ready for
10756 * packet processing. Invoked with tp->lock held.
10758 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10760 /* Chip may have been just powered on. If so, the boot code may still
10761 * be running initialization. Wait for it to finish to avoid races in
10762 * accessing the hardware.
10764 tg3_enable_register_access(tp);
10767 tg3_switch_clocks(tp);
10769 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10771 return tg3_reset_hw(tp, reset_phy);
10774 #ifdef CONFIG_TIGON3_HWMON
10775 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10777 u32 off, len = TG3_OCIR_LEN;
10780 for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) {
10781 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10783 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10784 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10785 memset(ocir, 0, len);
10789 /* sysfs attributes for hwmon */
10790 static ssize_t tg3_show_temp(struct device *dev,
10791 struct device_attribute *devattr, char *buf)
10793 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10794 struct tg3 *tp = dev_get_drvdata(dev);
10797 spin_lock_bh(&tp->lock);
10798 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10799 sizeof(temperature));
10800 spin_unlock_bh(&tp->lock);
10801 return sprintf(buf, "%u\n", temperature * 1000);
10805 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10806 TG3_TEMP_SENSOR_OFFSET);
10807 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10808 TG3_TEMP_CAUTION_OFFSET);
10809 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10810 TG3_TEMP_MAX_OFFSET);
10812 static struct attribute *tg3_attrs[] = {
10813 &sensor_dev_attr_temp1_input.dev_attr.attr,
10814 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10815 &sensor_dev_attr_temp1_max.dev_attr.attr,
10818 ATTRIBUTE_GROUPS(tg3);
10820 static void tg3_hwmon_close(struct tg3 *tp)
10822 if (tp->hwmon_dev) {
10823 hwmon_device_unregister(tp->hwmon_dev);
10824 tp->hwmon_dev = NULL;
10828 static void tg3_hwmon_open(struct tg3 *tp)
10832 struct pci_dev *pdev = tp->pdev;
10833 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10835 tg3_sd_scan_scratchpad(tp, ocirs);
10837 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10838 if (!ocirs[i].src_data_length)
10841 size += ocirs[i].src_hdr_length;
10842 size += ocirs[i].src_data_length;
10848 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10850 if (IS_ERR(tp->hwmon_dev)) {
10851 tp->hwmon_dev = NULL;
10852 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10856 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10857 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10858 #endif /* CONFIG_TIGON3_HWMON */
10861 #define TG3_STAT_ADD32(PSTAT, REG) \
10862 do { u32 __val = tr32(REG); \
10863 (PSTAT)->low += __val; \
10864 if ((PSTAT)->low < __val) \
10865 (PSTAT)->high += 1; \
10868 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10870 struct tg3_hw_stats *sp = tp->hw_stats;
10875 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10876 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10877 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10878 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10879 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10880 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10881 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10882 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10883 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10884 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10885 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10886 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10887 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10888 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10889 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10890 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10893 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10894 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10895 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10896 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10899 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10900 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10901 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10902 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10903 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10904 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10905 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10906 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10907 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10908 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10909 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10910 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10911 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10912 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10914 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10915 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10916 tg3_asic_rev(tp) != ASIC_REV_5762 &&
10917 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10918 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10919 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10921 u32 val = tr32(HOSTCC_FLOW_ATTN);
10922 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10924 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10925 sp->rx_discards.low += val;
10926 if (sp->rx_discards.low < val)
10927 sp->rx_discards.high += 1;
10929 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10931 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10934 static void tg3_chk_missed_msi(struct tg3 *tp)
10938 for (i = 0; i < tp->irq_cnt; i++) {
10939 struct tg3_napi *tnapi = &tp->napi[i];
10941 if (tg3_has_work(tnapi)) {
10942 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10943 tnapi->last_tx_cons == tnapi->tx_cons) {
10944 if (tnapi->chk_msi_cnt < 1) {
10945 tnapi->chk_msi_cnt++;
10951 tnapi->chk_msi_cnt = 0;
10952 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10953 tnapi->last_tx_cons = tnapi->tx_cons;
10957 static void tg3_timer(struct timer_list *t)
10959 struct tg3 *tp = from_timer(tp, t, timer);
10961 spin_lock(&tp->lock);
10963 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10964 spin_unlock(&tp->lock);
10965 goto restart_timer;
10968 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10969 tg3_flag(tp, 57765_CLASS))
10970 tg3_chk_missed_msi(tp);
10972 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10973 /* BCM4785: Flush posted writes from GbE to host memory. */
10977 if (!tg3_flag(tp, TAGGED_STATUS)) {
10978 /* All of this garbage is because when using non-tagged
10979 * IRQ status the mailbox/status_block protocol the chip
10980 * uses with the cpu is race prone.
10982 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10983 tw32(GRC_LOCAL_CTRL,
10984 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10986 tw32(HOSTCC_MODE, tp->coalesce_mode |
10987 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10990 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10991 spin_unlock(&tp->lock);
10992 tg3_reset_task_schedule(tp);
10993 goto restart_timer;
10997 /* This part only runs once per second. */
10998 if (!--tp->timer_counter) {
10999 if (tg3_flag(tp, 5705_PLUS))
11000 tg3_periodic_fetch_stats(tp);
11002 if (tp->setlpicnt && !--tp->setlpicnt)
11003 tg3_phy_eee_enable(tp);
11005 if (tg3_flag(tp, USE_LINKCHG_REG)) {
11009 mac_stat = tr32(MAC_STATUS);
11012 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11013 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11015 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11019 tg3_setup_phy(tp, false);
11020 } else if (tg3_flag(tp, POLL_SERDES)) {
11021 u32 mac_stat = tr32(MAC_STATUS);
11022 int need_setup = 0;
11025 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11028 if (!tp->link_up &&
11029 (mac_stat & (MAC_STATUS_PCS_SYNCED |
11030 MAC_STATUS_SIGNAL_DET))) {
11034 if (!tp->serdes_counter) {
11037 ~MAC_MODE_PORT_MODE_MASK));
11039 tw32_f(MAC_MODE, tp->mac_mode);
11042 tg3_setup_phy(tp, false);
11044 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11045 tg3_flag(tp, 5780_CLASS)) {
11046 tg3_serdes_parallel_detect(tp);
11047 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11048 u32 cpmu = tr32(TG3_CPMU_STATUS);
11049 bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11050 TG3_CPMU_STATUS_LINK_MASK);
11052 if (link_up != tp->link_up)
11053 tg3_setup_phy(tp, false);
11056 tp->timer_counter = tp->timer_multiplier;
11059 /* Heartbeat is only sent once every 2 seconds.
11061 * The heartbeat is to tell the ASF firmware that the host
11062 * driver is still alive. In the event that the OS crashes,
11063 * ASF needs to reset the hardware to free up the FIFO space
11064 * that may be filled with rx packets destined for the host.
11065 * If the FIFO is full, ASF will no longer function properly.
11067 * Unintended resets have been reported on real time kernels
11068 * where the timer doesn't run on time. Netpoll will also have
11071 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11072 * to check the ring condition when the heartbeat is expiring
11073 * before doing the reset. This will prevent most unintended
11076 if (!--tp->asf_counter) {
11077 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11078 tg3_wait_for_event_ack(tp);
11080 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11081 FWCMD_NICDRV_ALIVE3);
11082 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11083 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11084 TG3_FW_UPDATE_TIMEOUT_SEC);
11086 tg3_generate_fw_event(tp);
11088 tp->asf_counter = tp->asf_multiplier;
11091 /* Update the APE heartbeat every 5 seconds.*/
11092 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11094 spin_unlock(&tp->lock);
11097 tp->timer.expires = jiffies + tp->timer_offset;
11098 add_timer(&tp->timer);
11101 static void tg3_timer_init(struct tg3 *tp)
11103 if (tg3_flag(tp, TAGGED_STATUS) &&
11104 tg3_asic_rev(tp) != ASIC_REV_5717 &&
11105 !tg3_flag(tp, 57765_CLASS))
11106 tp->timer_offset = HZ;
11108 tp->timer_offset = HZ / 10;
11110 BUG_ON(tp->timer_offset > HZ);
11112 tp->timer_multiplier = (HZ / tp->timer_offset);
11113 tp->asf_multiplier = (HZ / tp->timer_offset) *
11114 TG3_FW_UPDATE_FREQ_SEC;
11116 timer_setup(&tp->timer, tg3_timer, 0);
11119 static void tg3_timer_start(struct tg3 *tp)
11121 tp->asf_counter = tp->asf_multiplier;
11122 tp->timer_counter = tp->timer_multiplier;
11124 tp->timer.expires = jiffies + tp->timer_offset;
11125 add_timer(&tp->timer);
11128 static void tg3_timer_stop(struct tg3 *tp)
11130 del_timer_sync(&tp->timer);
11133 /* Restart hardware after configuration changes, self-test, etc.
11134 * Invoked with tp->lock held.
11136 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11137 __releases(tp->lock)
11138 __acquires(tp->lock)
11142 err = tg3_init_hw(tp, reset_phy);
11144 netdev_err(tp->dev,
11145 "Failed to re-initialize device, aborting\n");
11146 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11147 tg3_full_unlock(tp);
11148 tg3_timer_stop(tp);
11150 tg3_napi_enable(tp);
11151 dev_close(tp->dev);
11152 tg3_full_lock(tp, 0);
11157 static void tg3_reset_task(struct work_struct *work)
11159 struct tg3 *tp = container_of(work, struct tg3, reset_task);
11163 tg3_full_lock(tp, 0);
11165 if (!netif_running(tp->dev)) {
11166 tg3_flag_clear(tp, RESET_TASK_PENDING);
11167 tg3_full_unlock(tp);
11172 tg3_full_unlock(tp);
11176 tg3_netif_stop(tp);
11178 tg3_full_lock(tp, 1);
11180 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11181 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11182 tp->write32_rx_mbox = tg3_write_flush_reg32;
11183 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11184 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11187 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11188 err = tg3_init_hw(tp, true);
11190 tg3_full_unlock(tp);
11192 tg3_napi_enable(tp);
11193 /* Clear this flag so that tg3_reset_task_cancel() will not
11194 * call cancel_work_sync() and wait forever.
11196 tg3_flag_clear(tp, RESET_TASK_PENDING);
11197 dev_close(tp->dev);
11201 tg3_netif_start(tp);
11202 tg3_full_unlock(tp);
11204 tg3_flag_clear(tp, RESET_TASK_PENDING);
11209 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11212 unsigned long flags;
11214 struct tg3_napi *tnapi = &tp->napi[irq_num];
11216 if (tp->irq_cnt == 1)
11217 name = tp->dev->name;
11219 name = &tnapi->irq_lbl[0];
11220 if (tnapi->tx_buffers && tnapi->rx_rcb)
11221 snprintf(name, IFNAMSIZ,
11222 "%s-txrx-%d", tp->dev->name, irq_num);
11223 else if (tnapi->tx_buffers)
11224 snprintf(name, IFNAMSIZ,
11225 "%s-tx-%d", tp->dev->name, irq_num);
11226 else if (tnapi->rx_rcb)
11227 snprintf(name, IFNAMSIZ,
11228 "%s-rx-%d", tp->dev->name, irq_num);
11230 snprintf(name, IFNAMSIZ,
11231 "%s-%d", tp->dev->name, irq_num);
11232 name[IFNAMSIZ-1] = 0;
11235 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11237 if (tg3_flag(tp, 1SHOT_MSI))
11238 fn = tg3_msi_1shot;
11241 fn = tg3_interrupt;
11242 if (tg3_flag(tp, TAGGED_STATUS))
11243 fn = tg3_interrupt_tagged;
11244 flags = IRQF_SHARED;
11247 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11250 static int tg3_test_interrupt(struct tg3 *tp)
11252 struct tg3_napi *tnapi = &tp->napi[0];
11253 struct net_device *dev = tp->dev;
11254 int err, i, intr_ok = 0;
11257 if (!netif_running(dev))
11260 tg3_disable_ints(tp);
11262 free_irq(tnapi->irq_vec, tnapi);
11265 * Turn off MSI one shot mode. Otherwise this test has no
11266 * observable way to know whether the interrupt was delivered.
11268 if (tg3_flag(tp, 57765_PLUS)) {
11269 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11270 tw32(MSGINT_MODE, val);
11273 err = request_irq(tnapi->irq_vec, tg3_test_isr,
11274 IRQF_SHARED, dev->name, tnapi);
11278 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11279 tg3_enable_ints(tp);
11281 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11284 for (i = 0; i < 5; i++) {
11285 u32 int_mbox, misc_host_ctrl;
11287 int_mbox = tr32_mailbox(tnapi->int_mbox);
11288 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11290 if ((int_mbox != 0) ||
11291 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11296 if (tg3_flag(tp, 57765_PLUS) &&
11297 tnapi->hw_status->status_tag != tnapi->last_tag)
11298 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11303 tg3_disable_ints(tp);
11305 free_irq(tnapi->irq_vec, tnapi);
11307 err = tg3_request_irq(tp, 0);
11313 /* Reenable MSI one shot mode. */
11314 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11315 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11316 tw32(MSGINT_MODE, val);
11324 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11325 * successfully restored
11327 static int tg3_test_msi(struct tg3 *tp)
11332 if (!tg3_flag(tp, USING_MSI))
11335 /* Turn off SERR reporting in case MSI terminates with Master
11338 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11339 pci_write_config_word(tp->pdev, PCI_COMMAND,
11340 pci_cmd & ~PCI_COMMAND_SERR);
11342 err = tg3_test_interrupt(tp);
11344 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11349 /* other failures */
11353 /* MSI test failed, go back to INTx mode */
11354 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11355 "to INTx mode. Please report this failure to the PCI "
11356 "maintainer and include system chipset information\n");
11358 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11360 pci_disable_msi(tp->pdev);
11362 tg3_flag_clear(tp, USING_MSI);
11363 tp->napi[0].irq_vec = tp->pdev->irq;
11365 err = tg3_request_irq(tp, 0);
11369 /* Need to reset the chip because the MSI cycle may have terminated
11370 * with Master Abort.
11372 tg3_full_lock(tp, 1);
11374 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11375 err = tg3_init_hw(tp, true);
11377 tg3_full_unlock(tp);
11380 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11385 static int tg3_request_firmware(struct tg3 *tp)
11387 const struct tg3_firmware_hdr *fw_hdr;
11389 if (reject_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11390 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11395 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11397 /* Firmware blob starts with version numbers, followed by
11398 * start address and _full_ length including BSS sections
11399 * (which must be longer than the actual data, of course
11402 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11403 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11404 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11405 tp->fw_len, tp->fw_needed);
11406 release_firmware(tp->fw);
11411 /* We no longer need firmware; we have it. */
11412 tp->fw_needed = NULL;
11416 static u32 tg3_irq_count(struct tg3 *tp)
11418 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11421 /* We want as many rx rings enabled as there are cpus.
11422 * In multiqueue MSI-X mode, the first MSI-X vector
11423 * only deals with link interrupts, etc, so we add
11424 * one to the number of vectors we are requesting.
11426 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11432 static bool tg3_enable_msix(struct tg3 *tp)
11435 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11437 tp->txq_cnt = tp->txq_req;
11438 tp->rxq_cnt = tp->rxq_req;
11440 tp->rxq_cnt = netif_get_num_default_rss_queues();
11441 if (tp->rxq_cnt > tp->rxq_max)
11442 tp->rxq_cnt = tp->rxq_max;
11444 /* Disable multiple TX rings by default. Simple round-robin hardware
11445 * scheduling of the TX rings can cause starvation of rings with
11446 * small packets when other rings have TSO or jumbo packets.
11451 tp->irq_cnt = tg3_irq_count(tp);
11453 for (i = 0; i < tp->irq_max; i++) {
11454 msix_ent[i].entry = i;
11455 msix_ent[i].vector = 0;
11458 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11461 } else if (rc < tp->irq_cnt) {
11462 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11465 tp->rxq_cnt = max(rc - 1, 1);
11467 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11470 for (i = 0; i < tp->irq_max; i++)
11471 tp->napi[i].irq_vec = msix_ent[i].vector;
11473 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11474 pci_disable_msix(tp->pdev);
11478 if (tp->irq_cnt == 1)
11481 tg3_flag_set(tp, ENABLE_RSS);
11483 if (tp->txq_cnt > 1)
11484 tg3_flag_set(tp, ENABLE_TSS);
11486 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11491 static void tg3_ints_init(struct tg3 *tp)
11493 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11494 !tg3_flag(tp, TAGGED_STATUS)) {
11495 /* All MSI supporting chips should support tagged
11496 * status. Assert that this is the case.
11498 netdev_warn(tp->dev,
11499 "MSI without TAGGED_STATUS? Not using MSI\n");
11503 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11504 tg3_flag_set(tp, USING_MSIX);
11505 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11506 tg3_flag_set(tp, USING_MSI);
11508 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11509 u32 msi_mode = tr32(MSGINT_MODE);
11510 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11511 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11512 if (!tg3_flag(tp, 1SHOT_MSI))
11513 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11514 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11517 if (!tg3_flag(tp, USING_MSIX)) {
11519 tp->napi[0].irq_vec = tp->pdev->irq;
11522 if (tp->irq_cnt == 1) {
11525 netif_set_real_num_tx_queues(tp->dev, 1);
11526 netif_set_real_num_rx_queues(tp->dev, 1);
11530 static void tg3_ints_fini(struct tg3 *tp)
11532 if (tg3_flag(tp, USING_MSIX))
11533 pci_disable_msix(tp->pdev);
11534 else if (tg3_flag(tp, USING_MSI))
11535 pci_disable_msi(tp->pdev);
11536 tg3_flag_clear(tp, USING_MSI);
11537 tg3_flag_clear(tp, USING_MSIX);
11538 tg3_flag_clear(tp, ENABLE_RSS);
11539 tg3_flag_clear(tp, ENABLE_TSS);
11542 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11545 struct net_device *dev = tp->dev;
11549 * Setup interrupts first so we know how
11550 * many NAPI resources to allocate
11554 tg3_rss_check_indir_tbl(tp);
11556 /* The placement of this call is tied
11557 * to the setup and use of Host TX descriptors.
11559 err = tg3_alloc_consistent(tp);
11561 goto out_ints_fini;
11565 tg3_napi_enable(tp);
11567 for (i = 0; i < tp->irq_cnt; i++) {
11568 err = tg3_request_irq(tp, i);
11570 for (i--; i >= 0; i--) {
11571 struct tg3_napi *tnapi = &tp->napi[i];
11573 free_irq(tnapi->irq_vec, tnapi);
11575 goto out_napi_fini;
11579 tg3_full_lock(tp, 0);
11582 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11584 err = tg3_init_hw(tp, reset_phy);
11586 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11587 tg3_free_rings(tp);
11590 tg3_full_unlock(tp);
11595 if (test_irq && tg3_flag(tp, USING_MSI)) {
11596 err = tg3_test_msi(tp);
11599 tg3_full_lock(tp, 0);
11600 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11601 tg3_free_rings(tp);
11602 tg3_full_unlock(tp);
11604 goto out_napi_fini;
11607 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11608 u32 val = tr32(PCIE_TRANSACTION_CFG);
11610 tw32(PCIE_TRANSACTION_CFG,
11611 val | PCIE_TRANS_CFG_1SHOT_MSI);
11617 tg3_hwmon_open(tp);
11619 tg3_full_lock(tp, 0);
11621 tg3_timer_start(tp);
11622 tg3_flag_set(tp, INIT_COMPLETE);
11623 tg3_enable_ints(tp);
11625 tg3_ptp_resume(tp);
11627 tg3_full_unlock(tp);
11629 netif_tx_start_all_queues(dev);
11632 * Reset loopback feature if it was turned on while the device was down
11633 * make sure that it's installed properly now.
11635 if (dev->features & NETIF_F_LOOPBACK)
11636 tg3_set_loopback(dev, dev->features);
11641 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11642 struct tg3_napi *tnapi = &tp->napi[i];
11643 free_irq(tnapi->irq_vec, tnapi);
11647 tg3_napi_disable(tp);
11649 tg3_free_consistent(tp);
11657 static void tg3_stop(struct tg3 *tp)
11661 tg3_reset_task_cancel(tp);
11662 tg3_netif_stop(tp);
11664 tg3_timer_stop(tp);
11666 tg3_hwmon_close(tp);
11670 tg3_full_lock(tp, 1);
11672 tg3_disable_ints(tp);
11674 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11675 tg3_free_rings(tp);
11676 tg3_flag_clear(tp, INIT_COMPLETE);
11678 tg3_full_unlock(tp);
11680 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11681 struct tg3_napi *tnapi = &tp->napi[i];
11682 free_irq(tnapi->irq_vec, tnapi);
11689 tg3_free_consistent(tp);
11692 static int tg3_open(struct net_device *dev)
11694 struct tg3 *tp = netdev_priv(dev);
11697 if (tp->pcierr_recovery) {
11698 netdev_err(dev, "Failed to open device. PCI error recovery "
11703 if (tp->fw_needed) {
11704 err = tg3_request_firmware(tp);
11705 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11707 netdev_warn(tp->dev, "EEE capability disabled\n");
11708 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11709 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11710 netdev_warn(tp->dev, "EEE capability restored\n");
11711 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11713 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11717 netdev_warn(tp->dev, "TSO capability disabled\n");
11718 tg3_flag_clear(tp, TSO_CAPABLE);
11719 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11720 netdev_notice(tp->dev, "TSO capability restored\n");
11721 tg3_flag_set(tp, TSO_CAPABLE);
11725 tg3_carrier_off(tp);
11727 err = tg3_power_up(tp);
11731 tg3_full_lock(tp, 0);
11733 tg3_disable_ints(tp);
11734 tg3_flag_clear(tp, INIT_COMPLETE);
11736 tg3_full_unlock(tp);
11738 err = tg3_start(tp,
11739 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11742 tg3_frob_aux_power(tp, false);
11743 pci_set_power_state(tp->pdev, PCI_D3hot);
11749 static int tg3_close(struct net_device *dev)
11751 struct tg3 *tp = netdev_priv(dev);
11753 if (tp->pcierr_recovery) {
11754 netdev_err(dev, "Failed to close device. PCI error recovery "
11761 if (pci_device_is_present(tp->pdev)) {
11762 tg3_power_down_prepare(tp);
11764 tg3_carrier_off(tp);
11769 static inline u64 get_stat64(tg3_stat64_t *val)
11771 return ((u64)val->high << 32) | ((u64)val->low);
11774 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11776 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11778 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11779 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11780 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11783 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11784 tg3_writephy(tp, MII_TG3_TEST1,
11785 val | MII_TG3_TEST1_CRC_EN);
11786 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11790 tp->phy_crc_errors += val;
11792 return tp->phy_crc_errors;
11795 return get_stat64(&hw_stats->rx_fcs_errors);
11798 #define ESTAT_ADD(member) \
11799 estats->member = old_estats->member + \
11800 get_stat64(&hw_stats->member)
11802 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11804 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11805 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11807 ESTAT_ADD(rx_octets);
11808 ESTAT_ADD(rx_fragments);
11809 ESTAT_ADD(rx_ucast_packets);
11810 ESTAT_ADD(rx_mcast_packets);
11811 ESTAT_ADD(rx_bcast_packets);
11812 ESTAT_ADD(rx_fcs_errors);
11813 ESTAT_ADD(rx_align_errors);
11814 ESTAT_ADD(rx_xon_pause_rcvd);
11815 ESTAT_ADD(rx_xoff_pause_rcvd);
11816 ESTAT_ADD(rx_mac_ctrl_rcvd);
11817 ESTAT_ADD(rx_xoff_entered);
11818 ESTAT_ADD(rx_frame_too_long_errors);
11819 ESTAT_ADD(rx_jabbers);
11820 ESTAT_ADD(rx_undersize_packets);
11821 ESTAT_ADD(rx_in_length_errors);
11822 ESTAT_ADD(rx_out_length_errors);
11823 ESTAT_ADD(rx_64_or_less_octet_packets);
11824 ESTAT_ADD(rx_65_to_127_octet_packets);
11825 ESTAT_ADD(rx_128_to_255_octet_packets);
11826 ESTAT_ADD(rx_256_to_511_octet_packets);
11827 ESTAT_ADD(rx_512_to_1023_octet_packets);
11828 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11829 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11830 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11831 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11832 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11834 ESTAT_ADD(tx_octets);
11835 ESTAT_ADD(tx_collisions);
11836 ESTAT_ADD(tx_xon_sent);
11837 ESTAT_ADD(tx_xoff_sent);
11838 ESTAT_ADD(tx_flow_control);
11839 ESTAT_ADD(tx_mac_errors);
11840 ESTAT_ADD(tx_single_collisions);
11841 ESTAT_ADD(tx_mult_collisions);
11842 ESTAT_ADD(tx_deferred);
11843 ESTAT_ADD(tx_excessive_collisions);
11844 ESTAT_ADD(tx_late_collisions);
11845 ESTAT_ADD(tx_collide_2times);
11846 ESTAT_ADD(tx_collide_3times);
11847 ESTAT_ADD(tx_collide_4times);
11848 ESTAT_ADD(tx_collide_5times);
11849 ESTAT_ADD(tx_collide_6times);
11850 ESTAT_ADD(tx_collide_7times);
11851 ESTAT_ADD(tx_collide_8times);
11852 ESTAT_ADD(tx_collide_9times);
11853 ESTAT_ADD(tx_collide_10times);
11854 ESTAT_ADD(tx_collide_11times);
11855 ESTAT_ADD(tx_collide_12times);
11856 ESTAT_ADD(tx_collide_13times);
11857 ESTAT_ADD(tx_collide_14times);
11858 ESTAT_ADD(tx_collide_15times);
11859 ESTAT_ADD(tx_ucast_packets);
11860 ESTAT_ADD(tx_mcast_packets);
11861 ESTAT_ADD(tx_bcast_packets);
11862 ESTAT_ADD(tx_carrier_sense_errors);
11863 ESTAT_ADD(tx_discards);
11864 ESTAT_ADD(tx_errors);
11866 ESTAT_ADD(dma_writeq_full);
11867 ESTAT_ADD(dma_write_prioq_full);
11868 ESTAT_ADD(rxbds_empty);
11869 ESTAT_ADD(rx_discards);
11870 ESTAT_ADD(rx_errors);
11871 ESTAT_ADD(rx_threshold_hit);
11873 ESTAT_ADD(dma_readq_full);
11874 ESTAT_ADD(dma_read_prioq_full);
11875 ESTAT_ADD(tx_comp_queue_full);
11877 ESTAT_ADD(ring_set_send_prod_index);
11878 ESTAT_ADD(ring_status_update);
11879 ESTAT_ADD(nic_irqs);
11880 ESTAT_ADD(nic_avoided_irqs);
11881 ESTAT_ADD(nic_tx_threshold_hit);
11883 ESTAT_ADD(mbuf_lwm_thresh_hit);
11886 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11888 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11889 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11891 stats->rx_packets = old_stats->rx_packets +
11892 get_stat64(&hw_stats->rx_ucast_packets) +
11893 get_stat64(&hw_stats->rx_mcast_packets) +
11894 get_stat64(&hw_stats->rx_bcast_packets);
11896 stats->tx_packets = old_stats->tx_packets +
11897 get_stat64(&hw_stats->tx_ucast_packets) +
11898 get_stat64(&hw_stats->tx_mcast_packets) +
11899 get_stat64(&hw_stats->tx_bcast_packets);
11901 stats->rx_bytes = old_stats->rx_bytes +
11902 get_stat64(&hw_stats->rx_octets);
11903 stats->tx_bytes = old_stats->tx_bytes +
11904 get_stat64(&hw_stats->tx_octets);
11906 stats->rx_errors = old_stats->rx_errors +
11907 get_stat64(&hw_stats->rx_errors);
11908 stats->tx_errors = old_stats->tx_errors +
11909 get_stat64(&hw_stats->tx_errors) +
11910 get_stat64(&hw_stats->tx_mac_errors) +
11911 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11912 get_stat64(&hw_stats->tx_discards);
11914 stats->multicast = old_stats->multicast +
11915 get_stat64(&hw_stats->rx_mcast_packets);
11916 stats->collisions = old_stats->collisions +
11917 get_stat64(&hw_stats->tx_collisions);
11919 stats->rx_length_errors = old_stats->rx_length_errors +
11920 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11921 get_stat64(&hw_stats->rx_undersize_packets);
11923 stats->rx_frame_errors = old_stats->rx_frame_errors +
11924 get_stat64(&hw_stats->rx_align_errors);
11925 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11926 get_stat64(&hw_stats->tx_discards);
11927 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11928 get_stat64(&hw_stats->tx_carrier_sense_errors);
11930 stats->rx_crc_errors = old_stats->rx_crc_errors +
11931 tg3_calc_crc_errors(tp);
11933 stats->rx_missed_errors = old_stats->rx_missed_errors +
11934 get_stat64(&hw_stats->rx_discards);
11936 stats->rx_dropped = tp->rx_dropped;
11937 stats->tx_dropped = tp->tx_dropped;
11940 static int tg3_get_regs_len(struct net_device *dev)
11942 return TG3_REG_BLK_SIZE;
11945 static void tg3_get_regs(struct net_device *dev,
11946 struct ethtool_regs *regs, void *_p)
11948 struct tg3 *tp = netdev_priv(dev);
11952 memset(_p, 0, TG3_REG_BLK_SIZE);
11954 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11957 tg3_full_lock(tp, 0);
11959 tg3_dump_legacy_regs(tp, (u32 *)_p);
11961 tg3_full_unlock(tp);
11964 static int tg3_get_eeprom_len(struct net_device *dev)
11966 struct tg3 *tp = netdev_priv(dev);
11968 return tp->nvram_size;
11971 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11973 struct tg3 *tp = netdev_priv(dev);
11974 int ret, cpmu_restore = 0;
11976 u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11979 if (tg3_flag(tp, NO_NVRAM))
11982 offset = eeprom->offset;
11986 eeprom->magic = TG3_EEPROM_MAGIC;
11988 /* Override clock, link aware and link idle modes */
11989 if (tg3_flag(tp, CPMU_PRESENT)) {
11990 cpmu_val = tr32(TG3_CPMU_CTRL);
11991 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
11992 CPMU_CTRL_LINK_IDLE_MODE)) {
11993 tw32(TG3_CPMU_CTRL, cpmu_val &
11994 ~(CPMU_CTRL_LINK_AWARE_MODE |
11995 CPMU_CTRL_LINK_IDLE_MODE));
11999 tg3_override_clk(tp);
12002 /* adjustments to start on required 4 byte boundary */
12003 b_offset = offset & 3;
12004 b_count = 4 - b_offset;
12005 if (b_count > len) {
12006 /* i.e. offset=1 len=2 */
12009 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12012 memcpy(data, ((char *)&val) + b_offset, b_count);
12015 eeprom->len += b_count;
12018 /* read bytes up to the last 4 byte boundary */
12019 pd = &data[eeprom->len];
12020 for (i = 0; i < (len - (len & 3)); i += 4) {
12021 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12028 memcpy(pd + i, &val, 4);
12029 if (need_resched()) {
12030 if (signal_pending(current)) {
12041 /* read last bytes not ending on 4 byte boundary */
12042 pd = &data[eeprom->len];
12044 b_offset = offset + len - b_count;
12045 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12048 memcpy(pd, &val, b_count);
12049 eeprom->len += b_count;
12054 /* Restore clock, link aware and link idle modes */
12055 tg3_restore_clk(tp);
12057 tw32(TG3_CPMU_CTRL, cpmu_val);
12062 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12064 struct tg3 *tp = netdev_priv(dev);
12066 u32 offset, len, b_offset, odd_len;
12068 __be32 start = 0, end;
12070 if (tg3_flag(tp, NO_NVRAM) ||
12071 eeprom->magic != TG3_EEPROM_MAGIC)
12074 offset = eeprom->offset;
12077 if ((b_offset = (offset & 3))) {
12078 /* adjustments to start on required 4 byte boundary */
12079 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12090 /* adjustments to end on required 4 byte boundary */
12092 len = (len + 3) & ~3;
12093 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12099 if (b_offset || odd_len) {
12100 buf = kmalloc(len, GFP_KERNEL);
12104 memcpy(buf, &start, 4);
12106 memcpy(buf+len-4, &end, 4);
12107 memcpy(buf + b_offset, data, eeprom->len);
12110 ret = tg3_nvram_write_block(tp, offset, len, buf);
12118 static int tg3_get_link_ksettings(struct net_device *dev,
12119 struct ethtool_link_ksettings *cmd)
12121 struct tg3 *tp = netdev_priv(dev);
12122 u32 supported, advertising;
12124 if (tg3_flag(tp, USE_PHYLIB)) {
12125 struct phy_device *phydev;
12126 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12128 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12129 phy_ethtool_ksettings_get(phydev, cmd);
12134 supported = (SUPPORTED_Autoneg);
12136 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12137 supported |= (SUPPORTED_1000baseT_Half |
12138 SUPPORTED_1000baseT_Full);
12140 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12141 supported |= (SUPPORTED_100baseT_Half |
12142 SUPPORTED_100baseT_Full |
12143 SUPPORTED_10baseT_Half |
12144 SUPPORTED_10baseT_Full |
12146 cmd->base.port = PORT_TP;
12148 supported |= SUPPORTED_FIBRE;
12149 cmd->base.port = PORT_FIBRE;
12151 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12154 advertising = tp->link_config.advertising;
12155 if (tg3_flag(tp, PAUSE_AUTONEG)) {
12156 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12157 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12158 advertising |= ADVERTISED_Pause;
12160 advertising |= ADVERTISED_Pause |
12161 ADVERTISED_Asym_Pause;
12163 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12164 advertising |= ADVERTISED_Asym_Pause;
12167 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12170 if (netif_running(dev) && tp->link_up) {
12171 cmd->base.speed = tp->link_config.active_speed;
12172 cmd->base.duplex = tp->link_config.active_duplex;
12173 ethtool_convert_legacy_u32_to_link_mode(
12174 cmd->link_modes.lp_advertising,
12175 tp->link_config.rmt_adv);
12177 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12178 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12179 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12181 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12184 cmd->base.speed = SPEED_UNKNOWN;
12185 cmd->base.duplex = DUPLEX_UNKNOWN;
12186 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12188 cmd->base.phy_address = tp->phy_addr;
12189 cmd->base.autoneg = tp->link_config.autoneg;
12193 static int tg3_set_link_ksettings(struct net_device *dev,
12194 const struct ethtool_link_ksettings *cmd)
12196 struct tg3 *tp = netdev_priv(dev);
12197 u32 speed = cmd->base.speed;
12200 if (tg3_flag(tp, USE_PHYLIB)) {
12201 struct phy_device *phydev;
12202 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12204 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12205 return phy_ethtool_ksettings_set(phydev, cmd);
12208 if (cmd->base.autoneg != AUTONEG_ENABLE &&
12209 cmd->base.autoneg != AUTONEG_DISABLE)
12212 if (cmd->base.autoneg == AUTONEG_DISABLE &&
12213 cmd->base.duplex != DUPLEX_FULL &&
12214 cmd->base.duplex != DUPLEX_HALF)
12217 ethtool_convert_link_mode_to_legacy_u32(&advertising,
12218 cmd->link_modes.advertising);
12220 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12221 u32 mask = ADVERTISED_Autoneg |
12223 ADVERTISED_Asym_Pause;
12225 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12226 mask |= ADVERTISED_1000baseT_Half |
12227 ADVERTISED_1000baseT_Full;
12229 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12230 mask |= ADVERTISED_100baseT_Half |
12231 ADVERTISED_100baseT_Full |
12232 ADVERTISED_10baseT_Half |
12233 ADVERTISED_10baseT_Full |
12236 mask |= ADVERTISED_FIBRE;
12238 if (advertising & ~mask)
12241 mask &= (ADVERTISED_1000baseT_Half |
12242 ADVERTISED_1000baseT_Full |
12243 ADVERTISED_100baseT_Half |
12244 ADVERTISED_100baseT_Full |
12245 ADVERTISED_10baseT_Half |
12246 ADVERTISED_10baseT_Full);
12248 advertising &= mask;
12250 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12251 if (speed != SPEED_1000)
12254 if (cmd->base.duplex != DUPLEX_FULL)
12257 if (speed != SPEED_100 &&
12263 tg3_full_lock(tp, 0);
12265 tp->link_config.autoneg = cmd->base.autoneg;
12266 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12267 tp->link_config.advertising = (advertising |
12268 ADVERTISED_Autoneg);
12269 tp->link_config.speed = SPEED_UNKNOWN;
12270 tp->link_config.duplex = DUPLEX_UNKNOWN;
12272 tp->link_config.advertising = 0;
12273 tp->link_config.speed = speed;
12274 tp->link_config.duplex = cmd->base.duplex;
12277 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12279 tg3_warn_mgmt_link_flap(tp);
12281 if (netif_running(dev))
12282 tg3_setup_phy(tp, true);
12284 tg3_full_unlock(tp);
12289 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12291 struct tg3 *tp = netdev_priv(dev);
12293 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12294 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12295 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12298 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12300 struct tg3 *tp = netdev_priv(dev);
12302 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12303 wol->supported = WAKE_MAGIC;
12305 wol->supported = 0;
12307 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12308 wol->wolopts = WAKE_MAGIC;
12309 memset(&wol->sopass, 0, sizeof(wol->sopass));
12312 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12314 struct tg3 *tp = netdev_priv(dev);
12315 struct device *dp = &tp->pdev->dev;
12317 if (wol->wolopts & ~WAKE_MAGIC)
12319 if ((wol->wolopts & WAKE_MAGIC) &&
12320 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12323 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12325 if (device_may_wakeup(dp))
12326 tg3_flag_set(tp, WOL_ENABLE);
12328 tg3_flag_clear(tp, WOL_ENABLE);
12333 static u32 tg3_get_msglevel(struct net_device *dev)
12335 struct tg3 *tp = netdev_priv(dev);
12336 return tp->msg_enable;
12339 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12341 struct tg3 *tp = netdev_priv(dev);
12342 tp->msg_enable = value;
12345 static int tg3_nway_reset(struct net_device *dev)
12347 struct tg3 *tp = netdev_priv(dev);
12350 if (!netif_running(dev))
12353 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12356 tg3_warn_mgmt_link_flap(tp);
12358 if (tg3_flag(tp, USE_PHYLIB)) {
12359 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12361 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12365 spin_lock_bh(&tp->lock);
12367 tg3_readphy(tp, MII_BMCR, &bmcr);
12368 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12369 ((bmcr & BMCR_ANENABLE) ||
12370 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12371 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12375 spin_unlock_bh(&tp->lock);
12381 static void tg3_get_ringparam(struct net_device *dev,
12382 struct ethtool_ringparam *ering,
12383 struct kernel_ethtool_ringparam *kernel_ering,
12384 struct netlink_ext_ack *extack)
12386 struct tg3 *tp = netdev_priv(dev);
12388 ering->rx_max_pending = tp->rx_std_ring_mask;
12389 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12390 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12392 ering->rx_jumbo_max_pending = 0;
12394 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12396 ering->rx_pending = tp->rx_pending;
12397 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12398 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12400 ering->rx_jumbo_pending = 0;
12402 ering->tx_pending = tp->napi[0].tx_pending;
12405 static int tg3_set_ringparam(struct net_device *dev,
12406 struct ethtool_ringparam *ering,
12407 struct kernel_ethtool_ringparam *kernel_ering,
12408 struct netlink_ext_ack *extack)
12410 struct tg3 *tp = netdev_priv(dev);
12411 int i, irq_sync = 0, err = 0;
12412 bool reset_phy = false;
12414 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12415 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12416 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12417 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12418 (tg3_flag(tp, TSO_BUG) &&
12419 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12422 if (netif_running(dev)) {
12424 tg3_netif_stop(tp);
12428 tg3_full_lock(tp, irq_sync);
12430 tp->rx_pending = ering->rx_pending;
12432 if (tg3_flag(tp, MAX_RXPEND_64) &&
12433 tp->rx_pending > 63)
12434 tp->rx_pending = 63;
12436 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12437 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12439 for (i = 0; i < tp->irq_max; i++)
12440 tp->napi[i].tx_pending = ering->tx_pending;
12442 if (netif_running(dev)) {
12443 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12444 /* Reset PHY to avoid PHY lock up */
12445 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12446 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12447 tg3_asic_rev(tp) == ASIC_REV_5720)
12450 err = tg3_restart_hw(tp, reset_phy);
12452 tg3_netif_start(tp);
12455 tg3_full_unlock(tp);
12457 if (irq_sync && !err)
12463 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12465 struct tg3 *tp = netdev_priv(dev);
12467 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12469 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12470 epause->rx_pause = 1;
12472 epause->rx_pause = 0;
12474 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12475 epause->tx_pause = 1;
12477 epause->tx_pause = 0;
12480 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12482 struct tg3 *tp = netdev_priv(dev);
12484 bool reset_phy = false;
12486 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12487 tg3_warn_mgmt_link_flap(tp);
12489 if (tg3_flag(tp, USE_PHYLIB)) {
12490 struct phy_device *phydev;
12492 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12494 if (!phy_validate_pause(phydev, epause))
12497 tp->link_config.flowctrl = 0;
12498 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
12499 if (epause->rx_pause) {
12500 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12502 if (epause->tx_pause) {
12503 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12505 } else if (epause->tx_pause) {
12506 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12509 if (epause->autoneg)
12510 tg3_flag_set(tp, PAUSE_AUTONEG);
12512 tg3_flag_clear(tp, PAUSE_AUTONEG);
12514 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12515 if (phydev->autoneg) {
12516 /* phy_set_asym_pause() will
12517 * renegotiate the link to inform our
12518 * link partner of our flow control
12519 * settings, even if the flow control
12520 * is forced. Let tg3_adjust_link()
12521 * do the final flow control setup.
12526 if (!epause->autoneg)
12527 tg3_setup_flow_control(tp, 0, 0);
12532 if (netif_running(dev)) {
12533 tg3_netif_stop(tp);
12537 tg3_full_lock(tp, irq_sync);
12539 if (epause->autoneg)
12540 tg3_flag_set(tp, PAUSE_AUTONEG);
12542 tg3_flag_clear(tp, PAUSE_AUTONEG);
12543 if (epause->rx_pause)
12544 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12546 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12547 if (epause->tx_pause)
12548 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12550 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12552 if (netif_running(dev)) {
12553 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12554 /* Reset PHY to avoid PHY lock up */
12555 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12556 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12557 tg3_asic_rev(tp) == ASIC_REV_5720)
12560 err = tg3_restart_hw(tp, reset_phy);
12562 tg3_netif_start(tp);
12565 tg3_full_unlock(tp);
12568 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12573 static int tg3_get_sset_count(struct net_device *dev, int sset)
12577 return TG3_NUM_TEST;
12579 return TG3_NUM_STATS;
12581 return -EOPNOTSUPP;
12585 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12586 u32 *rules __always_unused)
12588 struct tg3 *tp = netdev_priv(dev);
12590 if (!tg3_flag(tp, SUPPORT_MSIX))
12591 return -EOPNOTSUPP;
12593 switch (info->cmd) {
12594 case ETHTOOL_GRXRINGS:
12595 if (netif_running(tp->dev))
12596 info->data = tp->rxq_cnt;
12598 info->data = num_online_cpus();
12599 if (info->data > TG3_RSS_MAX_NUM_QS)
12600 info->data = TG3_RSS_MAX_NUM_QS;
12606 return -EOPNOTSUPP;
12610 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12613 struct tg3 *tp = netdev_priv(dev);
12615 if (tg3_flag(tp, SUPPORT_MSIX))
12616 size = TG3_RSS_INDIR_TBL_SIZE;
12621 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12623 struct tg3 *tp = netdev_priv(dev);
12627 *hfunc = ETH_RSS_HASH_TOP;
12631 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12632 indir[i] = tp->rss_ind_tbl[i];
12637 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12640 struct tg3 *tp = netdev_priv(dev);
12643 /* We require at least one supported parameter to be changed and no
12644 * change in any of the unsupported parameters
12647 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12648 return -EOPNOTSUPP;
12653 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12654 tp->rss_ind_tbl[i] = indir[i];
12656 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12659 /* It is legal to write the indirection
12660 * table while the device is running.
12662 tg3_full_lock(tp, 0);
12663 tg3_rss_write_indir_tbl(tp);
12664 tg3_full_unlock(tp);
12669 static void tg3_get_channels(struct net_device *dev,
12670 struct ethtool_channels *channel)
12672 struct tg3 *tp = netdev_priv(dev);
12673 u32 deflt_qs = netif_get_num_default_rss_queues();
12675 channel->max_rx = tp->rxq_max;
12676 channel->max_tx = tp->txq_max;
12678 if (netif_running(dev)) {
12679 channel->rx_count = tp->rxq_cnt;
12680 channel->tx_count = tp->txq_cnt;
12683 channel->rx_count = tp->rxq_req;
12685 channel->rx_count = min(deflt_qs, tp->rxq_max);
12688 channel->tx_count = tp->txq_req;
12690 channel->tx_count = min(deflt_qs, tp->txq_max);
12694 static int tg3_set_channels(struct net_device *dev,
12695 struct ethtool_channels *channel)
12697 struct tg3 *tp = netdev_priv(dev);
12699 if (!tg3_flag(tp, SUPPORT_MSIX))
12700 return -EOPNOTSUPP;
12702 if (channel->rx_count > tp->rxq_max ||
12703 channel->tx_count > tp->txq_max)
12706 tp->rxq_req = channel->rx_count;
12707 tp->txq_req = channel->tx_count;
12709 if (!netif_running(dev))
12714 tg3_carrier_off(tp);
12716 tg3_start(tp, true, false, false);
12721 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12723 switch (stringset) {
12725 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
12728 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
12731 WARN_ON(1); /* we need a WARN() */
12736 static int tg3_set_phys_id(struct net_device *dev,
12737 enum ethtool_phys_id_state state)
12739 struct tg3 *tp = netdev_priv(dev);
12742 case ETHTOOL_ID_ACTIVE:
12743 return 1; /* cycle on/off once per second */
12745 case ETHTOOL_ID_ON:
12746 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12747 LED_CTRL_1000MBPS_ON |
12748 LED_CTRL_100MBPS_ON |
12749 LED_CTRL_10MBPS_ON |
12750 LED_CTRL_TRAFFIC_OVERRIDE |
12751 LED_CTRL_TRAFFIC_BLINK |
12752 LED_CTRL_TRAFFIC_LED);
12755 case ETHTOOL_ID_OFF:
12756 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12757 LED_CTRL_TRAFFIC_OVERRIDE);
12760 case ETHTOOL_ID_INACTIVE:
12761 tw32(MAC_LED_CTRL, tp->led_ctrl);
12768 static void tg3_get_ethtool_stats(struct net_device *dev,
12769 struct ethtool_stats *estats, u64 *tmp_stats)
12771 struct tg3 *tp = netdev_priv(dev);
12774 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12776 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12779 static __be32 *tg3_vpd_readblock(struct tg3 *tp, unsigned int *vpdlen)
12783 u32 offset = 0, len = 0;
12786 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12789 if (magic == TG3_EEPROM_MAGIC) {
12790 for (offset = TG3_NVM_DIR_START;
12791 offset < TG3_NVM_DIR_END;
12792 offset += TG3_NVM_DIRENT_SIZE) {
12793 if (tg3_nvram_read(tp, offset, &val))
12796 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12797 TG3_NVM_DIRTYPE_EXTVPD)
12801 if (offset != TG3_NVM_DIR_END) {
12802 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12803 if (tg3_nvram_read(tp, offset + 4, &offset))
12806 offset = tg3_nvram_logical_addr(tp, offset);
12809 if (!offset || !len) {
12810 offset = TG3_NVM_VPD_OFF;
12811 len = TG3_NVM_VPD_LEN;
12814 buf = kmalloc(len, GFP_KERNEL);
12818 for (i = 0; i < len; i += 4) {
12819 /* The data is in little-endian format in NVRAM.
12820 * Use the big-endian read routines to preserve
12821 * the byte order as it exists in NVRAM.
12823 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12828 buf = pci_vpd_alloc(tp->pdev, vpdlen);
12840 #define NVRAM_TEST_SIZE 0x100
12841 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12842 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12843 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12844 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12845 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12846 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12847 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12848 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12850 static int tg3_test_nvram(struct tg3 *tp)
12854 int i, j, k, err = 0, size;
12857 if (tg3_flag(tp, NO_NVRAM))
12860 if (tg3_nvram_read(tp, 0, &magic) != 0)
12863 if (magic == TG3_EEPROM_MAGIC)
12864 size = NVRAM_TEST_SIZE;
12865 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12866 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12867 TG3_EEPROM_SB_FORMAT_1) {
12868 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12869 case TG3_EEPROM_SB_REVISION_0:
12870 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12872 case TG3_EEPROM_SB_REVISION_2:
12873 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12875 case TG3_EEPROM_SB_REVISION_3:
12876 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12878 case TG3_EEPROM_SB_REVISION_4:
12879 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12881 case TG3_EEPROM_SB_REVISION_5:
12882 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12884 case TG3_EEPROM_SB_REVISION_6:
12885 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12892 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12893 size = NVRAM_SELFBOOT_HW_SIZE;
12897 buf = kmalloc(size, GFP_KERNEL);
12902 for (i = 0, j = 0; i < size; i += 4, j++) {
12903 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12910 /* Selfboot format */
12911 magic = be32_to_cpu(buf[0]);
12912 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12913 TG3_EEPROM_MAGIC_FW) {
12914 u8 *buf8 = (u8 *) buf, csum8 = 0;
12916 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12917 TG3_EEPROM_SB_REVISION_2) {
12918 /* For rev 2, the csum doesn't include the MBA. */
12919 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12921 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12924 for (i = 0; i < size; i++)
12937 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12938 TG3_EEPROM_MAGIC_HW) {
12939 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12940 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12941 u8 *buf8 = (u8 *) buf;
12943 /* Separate the parity bits and the data bytes. */
12944 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12945 if ((i == 0) || (i == 8)) {
12949 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12950 parity[k++] = buf8[i] & msk;
12952 } else if (i == 16) {
12956 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12957 parity[k++] = buf8[i] & msk;
12960 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12961 parity[k++] = buf8[i] & msk;
12964 data[j++] = buf8[i];
12968 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12969 u8 hw8 = hweight8(data[i]);
12971 if ((hw8 & 0x1) && parity[i])
12973 else if (!(hw8 & 0x1) && !parity[i])
12982 /* Bootstrap checksum at offset 0x10 */
12983 csum = calc_crc((unsigned char *) buf, 0x10);
12984 if (csum != le32_to_cpu(buf[0x10/4]))
12987 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12988 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12989 if (csum != le32_to_cpu(buf[0xfc/4]))
12994 buf = tg3_vpd_readblock(tp, &len);
12998 err = pci_vpd_check_csum(buf, len);
12999 /* go on if no checksum found */
13007 #define TG3_SERDES_TIMEOUT_SEC 2
13008 #define TG3_COPPER_TIMEOUT_SEC 6
13010 static int tg3_test_link(struct tg3 *tp)
13014 if (!netif_running(tp->dev))
13017 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13018 max = TG3_SERDES_TIMEOUT_SEC;
13020 max = TG3_COPPER_TIMEOUT_SEC;
13022 for (i = 0; i < max; i++) {
13026 if (msleep_interruptible(1000))
13033 /* Only test the commonly used registers */
13034 static int tg3_test_registers(struct tg3 *tp)
13036 int i, is_5705, is_5750;
13037 u32 offset, read_mask, write_mask, val, save_val, read_val;
13041 #define TG3_FL_5705 0x1
13042 #define TG3_FL_NOT_5705 0x2
13043 #define TG3_FL_NOT_5788 0x4
13044 #define TG3_FL_NOT_5750 0x8
13048 /* MAC Control Registers */
13049 { MAC_MODE, TG3_FL_NOT_5705,
13050 0x00000000, 0x00ef6f8c },
13051 { MAC_MODE, TG3_FL_5705,
13052 0x00000000, 0x01ef6b8c },
13053 { MAC_STATUS, TG3_FL_NOT_5705,
13054 0x03800107, 0x00000000 },
13055 { MAC_STATUS, TG3_FL_5705,
13056 0x03800100, 0x00000000 },
13057 { MAC_ADDR_0_HIGH, 0x0000,
13058 0x00000000, 0x0000ffff },
13059 { MAC_ADDR_0_LOW, 0x0000,
13060 0x00000000, 0xffffffff },
13061 { MAC_RX_MTU_SIZE, 0x0000,
13062 0x00000000, 0x0000ffff },
13063 { MAC_TX_MODE, 0x0000,
13064 0x00000000, 0x00000070 },
13065 { MAC_TX_LENGTHS, 0x0000,
13066 0x00000000, 0x00003fff },
13067 { MAC_RX_MODE, TG3_FL_NOT_5705,
13068 0x00000000, 0x000007fc },
13069 { MAC_RX_MODE, TG3_FL_5705,
13070 0x00000000, 0x000007dc },
13071 { MAC_HASH_REG_0, 0x0000,
13072 0x00000000, 0xffffffff },
13073 { MAC_HASH_REG_1, 0x0000,
13074 0x00000000, 0xffffffff },
13075 { MAC_HASH_REG_2, 0x0000,
13076 0x00000000, 0xffffffff },
13077 { MAC_HASH_REG_3, 0x0000,
13078 0x00000000, 0xffffffff },
13080 /* Receive Data and Receive BD Initiator Control Registers. */
13081 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13082 0x00000000, 0xffffffff },
13083 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13084 0x00000000, 0xffffffff },
13085 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13086 0x00000000, 0x00000003 },
13087 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13088 0x00000000, 0xffffffff },
13089 { RCVDBDI_STD_BD+0, 0x0000,
13090 0x00000000, 0xffffffff },
13091 { RCVDBDI_STD_BD+4, 0x0000,
13092 0x00000000, 0xffffffff },
13093 { RCVDBDI_STD_BD+8, 0x0000,
13094 0x00000000, 0xffff0002 },
13095 { RCVDBDI_STD_BD+0xc, 0x0000,
13096 0x00000000, 0xffffffff },
13098 /* Receive BD Initiator Control Registers. */
13099 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13100 0x00000000, 0xffffffff },
13101 { RCVBDI_STD_THRESH, TG3_FL_5705,
13102 0x00000000, 0x000003ff },
13103 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13104 0x00000000, 0xffffffff },
13106 /* Host Coalescing Control Registers. */
13107 { HOSTCC_MODE, TG3_FL_NOT_5705,
13108 0x00000000, 0x00000004 },
13109 { HOSTCC_MODE, TG3_FL_5705,
13110 0x00000000, 0x000000f6 },
13111 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13112 0x00000000, 0xffffffff },
13113 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13114 0x00000000, 0x000003ff },
13115 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13116 0x00000000, 0xffffffff },
13117 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13118 0x00000000, 0x000003ff },
13119 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13120 0x00000000, 0xffffffff },
13121 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13122 0x00000000, 0x000000ff },
13123 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13124 0x00000000, 0xffffffff },
13125 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13126 0x00000000, 0x000000ff },
13127 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13128 0x00000000, 0xffffffff },
13129 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13130 0x00000000, 0xffffffff },
13131 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13132 0x00000000, 0xffffffff },
13133 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13134 0x00000000, 0x000000ff },
13135 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13136 0x00000000, 0xffffffff },
13137 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13138 0x00000000, 0x000000ff },
13139 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13140 0x00000000, 0xffffffff },
13141 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13142 0x00000000, 0xffffffff },
13143 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13144 0x00000000, 0xffffffff },
13145 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13146 0x00000000, 0xffffffff },
13147 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13148 0x00000000, 0xffffffff },
13149 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13150 0xffffffff, 0x00000000 },
13151 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13152 0xffffffff, 0x00000000 },
13154 /* Buffer Manager Control Registers. */
13155 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13156 0x00000000, 0x007fff80 },
13157 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13158 0x00000000, 0x007fffff },
13159 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13160 0x00000000, 0x0000003f },
13161 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13162 0x00000000, 0x000001ff },
13163 { BUFMGR_MB_HIGH_WATER, 0x0000,
13164 0x00000000, 0x000001ff },
13165 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13166 0xffffffff, 0x00000000 },
13167 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13168 0xffffffff, 0x00000000 },
13170 /* Mailbox Registers */
13171 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13172 0x00000000, 0x000001ff },
13173 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13174 0x00000000, 0x000001ff },
13175 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13176 0x00000000, 0x000007ff },
13177 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13178 0x00000000, 0x000001ff },
13180 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13183 is_5705 = is_5750 = 0;
13184 if (tg3_flag(tp, 5705_PLUS)) {
13186 if (tg3_flag(tp, 5750_PLUS))
13190 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13191 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13194 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13197 if (tg3_flag(tp, IS_5788) &&
13198 (reg_tbl[i].flags & TG3_FL_NOT_5788))
13201 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13204 offset = (u32) reg_tbl[i].offset;
13205 read_mask = reg_tbl[i].read_mask;
13206 write_mask = reg_tbl[i].write_mask;
13208 /* Save the original register content */
13209 save_val = tr32(offset);
13211 /* Determine the read-only value. */
13212 read_val = save_val & read_mask;
13214 /* Write zero to the register, then make sure the read-only bits
13215 * are not changed and the read/write bits are all zeros.
13219 val = tr32(offset);
13221 /* Test the read-only and read/write bits. */
13222 if (((val & read_mask) != read_val) || (val & write_mask))
13225 /* Write ones to all the bits defined by RdMask and WrMask, then
13226 * make sure the read-only bits are not changed and the
13227 * read/write bits are all ones.
13229 tw32(offset, read_mask | write_mask);
13231 val = tr32(offset);
13233 /* Test the read-only bits. */
13234 if ((val & read_mask) != read_val)
13237 /* Test the read/write bits. */
13238 if ((val & write_mask) != write_mask)
13241 tw32(offset, save_val);
13247 if (netif_msg_hw(tp))
13248 netdev_err(tp->dev,
13249 "Register test failed at offset %x\n", offset);
13250 tw32(offset, save_val);
13254 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13256 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13260 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13261 for (j = 0; j < len; j += 4) {
13264 tg3_write_mem(tp, offset + j, test_pattern[i]);
13265 tg3_read_mem(tp, offset + j, &val);
13266 if (val != test_pattern[i])
13273 static int tg3_test_memory(struct tg3 *tp)
13275 static struct mem_entry {
13278 } mem_tbl_570x[] = {
13279 { 0x00000000, 0x00b50},
13280 { 0x00002000, 0x1c000},
13281 { 0xffffffff, 0x00000}
13282 }, mem_tbl_5705[] = {
13283 { 0x00000100, 0x0000c},
13284 { 0x00000200, 0x00008},
13285 { 0x00004000, 0x00800},
13286 { 0x00006000, 0x01000},
13287 { 0x00008000, 0x02000},
13288 { 0x00010000, 0x0e000},
13289 { 0xffffffff, 0x00000}
13290 }, mem_tbl_5755[] = {
13291 { 0x00000200, 0x00008},
13292 { 0x00004000, 0x00800},
13293 { 0x00006000, 0x00800},
13294 { 0x00008000, 0x02000},
13295 { 0x00010000, 0x0c000},
13296 { 0xffffffff, 0x00000}
13297 }, mem_tbl_5906[] = {
13298 { 0x00000200, 0x00008},
13299 { 0x00004000, 0x00400},
13300 { 0x00006000, 0x00400},
13301 { 0x00008000, 0x01000},
13302 { 0x00010000, 0x01000},
13303 { 0xffffffff, 0x00000}
13304 }, mem_tbl_5717[] = {
13305 { 0x00000200, 0x00008},
13306 { 0x00010000, 0x0a000},
13307 { 0x00020000, 0x13c00},
13308 { 0xffffffff, 0x00000}
13309 }, mem_tbl_57765[] = {
13310 { 0x00000200, 0x00008},
13311 { 0x00004000, 0x00800},
13312 { 0x00006000, 0x09800},
13313 { 0x00010000, 0x0a000},
13314 { 0xffffffff, 0x00000}
13316 struct mem_entry *mem_tbl;
13320 if (tg3_flag(tp, 5717_PLUS))
13321 mem_tbl = mem_tbl_5717;
13322 else if (tg3_flag(tp, 57765_CLASS) ||
13323 tg3_asic_rev(tp) == ASIC_REV_5762)
13324 mem_tbl = mem_tbl_57765;
13325 else if (tg3_flag(tp, 5755_PLUS))
13326 mem_tbl = mem_tbl_5755;
13327 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13328 mem_tbl = mem_tbl_5906;
13329 else if (tg3_flag(tp, 5705_PLUS))
13330 mem_tbl = mem_tbl_5705;
13332 mem_tbl = mem_tbl_570x;
13334 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13335 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13343 #define TG3_TSO_MSS 500
13345 #define TG3_TSO_IP_HDR_LEN 20
13346 #define TG3_TSO_TCP_HDR_LEN 20
13347 #define TG3_TSO_TCP_OPT_LEN 12
13349 static const u8 tg3_tso_header[] = {
13351 0x45, 0x00, 0x00, 0x00,
13352 0x00, 0x00, 0x40, 0x00,
13353 0x40, 0x06, 0x00, 0x00,
13354 0x0a, 0x00, 0x00, 0x01,
13355 0x0a, 0x00, 0x00, 0x02,
13356 0x0d, 0x00, 0xe0, 0x00,
13357 0x00, 0x00, 0x01, 0x00,
13358 0x00, 0x00, 0x02, 0x00,
13359 0x80, 0x10, 0x10, 0x00,
13360 0x14, 0x09, 0x00, 0x00,
13361 0x01, 0x01, 0x08, 0x0a,
13362 0x11, 0x11, 0x11, 0x11,
13363 0x11, 0x11, 0x11, 0x11,
13366 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13368 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13369 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13371 struct sk_buff *skb;
13372 u8 *tx_data, *rx_data;
13374 int num_pkts, tx_len, rx_len, i, err;
13375 struct tg3_rx_buffer_desc *desc;
13376 struct tg3_napi *tnapi, *rnapi;
13377 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13379 tnapi = &tp->napi[0];
13380 rnapi = &tp->napi[0];
13381 if (tp->irq_cnt > 1) {
13382 if (tg3_flag(tp, ENABLE_RSS))
13383 rnapi = &tp->napi[1];
13384 if (tg3_flag(tp, ENABLE_TSS))
13385 tnapi = &tp->napi[1];
13387 coal_now = tnapi->coal_now | rnapi->coal_now;
13392 skb = netdev_alloc_skb(tp->dev, tx_len);
13396 tx_data = skb_put(skb, tx_len);
13397 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13398 memset(tx_data + ETH_ALEN, 0x0, 8);
13400 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13402 if (tso_loopback) {
13403 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13405 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13406 TG3_TSO_TCP_OPT_LEN;
13408 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13409 sizeof(tg3_tso_header));
13412 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13413 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13415 /* Set the total length field in the IP header */
13416 iph->tot_len = htons((u16)(mss + hdr_len));
13418 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13419 TXD_FLAG_CPU_POST_DMA);
13421 if (tg3_flag(tp, HW_TSO_1) ||
13422 tg3_flag(tp, HW_TSO_2) ||
13423 tg3_flag(tp, HW_TSO_3)) {
13425 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13426 th = (struct tcphdr *)&tx_data[val];
13429 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13431 if (tg3_flag(tp, HW_TSO_3)) {
13432 mss |= (hdr_len & 0xc) << 12;
13433 if (hdr_len & 0x10)
13434 base_flags |= 0x00000010;
13435 base_flags |= (hdr_len & 0x3e0) << 5;
13436 } else if (tg3_flag(tp, HW_TSO_2))
13437 mss |= hdr_len << 9;
13438 else if (tg3_flag(tp, HW_TSO_1) ||
13439 tg3_asic_rev(tp) == ASIC_REV_5705) {
13440 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13442 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13445 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13448 data_off = ETH_HLEN;
13450 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13451 tx_len > VLAN_ETH_FRAME_LEN)
13452 base_flags |= TXD_FLAG_JMB_PKT;
13455 for (i = data_off; i < tx_len; i++)
13456 tx_data[i] = (u8) (i & 0xff);
13458 map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE);
13459 if (dma_mapping_error(&tp->pdev->dev, map)) {
13460 dev_kfree_skb(skb);
13464 val = tnapi->tx_prod;
13465 tnapi->tx_buffers[val].skb = skb;
13466 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13468 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13473 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13475 budget = tg3_tx_avail(tnapi);
13476 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13477 base_flags | TXD_FLAG_END, mss, 0)) {
13478 tnapi->tx_buffers[val].skb = NULL;
13479 dev_kfree_skb(skb);
13485 /* Sync BD data before updating mailbox */
13488 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13489 tr32_mailbox(tnapi->prodmbox);
13493 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13494 for (i = 0; i < 35; i++) {
13495 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13500 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13501 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13502 if ((tx_idx == tnapi->tx_prod) &&
13503 (rx_idx == (rx_start_idx + num_pkts)))
13507 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13508 dev_kfree_skb(skb);
13510 if (tx_idx != tnapi->tx_prod)
13513 if (rx_idx != rx_start_idx + num_pkts)
13517 while (rx_idx != rx_start_idx) {
13518 desc = &rnapi->rx_rcb[rx_start_idx++];
13519 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13520 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13522 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13523 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13526 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13529 if (!tso_loopback) {
13530 if (rx_len != tx_len)
13533 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13534 if (opaque_key != RXD_OPAQUE_RING_STD)
13537 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13540 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13541 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13542 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13546 if (opaque_key == RXD_OPAQUE_RING_STD) {
13547 rx_data = tpr->rx_std_buffers[desc_idx].data;
13548 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13550 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13551 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13552 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13557 dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len,
13560 rx_data += TG3_RX_OFFSET(tp);
13561 for (i = data_off; i < rx_len; i++, val++) {
13562 if (*(rx_data + i) != (u8) (val & 0xff))
13569 /* tg3_free_rings will unmap and free the rx_data */
13574 #define TG3_STD_LOOPBACK_FAILED 1
13575 #define TG3_JMB_LOOPBACK_FAILED 2
13576 #define TG3_TSO_LOOPBACK_FAILED 4
13577 #define TG3_LOOPBACK_FAILED \
13578 (TG3_STD_LOOPBACK_FAILED | \
13579 TG3_JMB_LOOPBACK_FAILED | \
13580 TG3_TSO_LOOPBACK_FAILED)
13582 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13586 u32 jmb_pkt_sz = 9000;
13589 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13591 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13592 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13594 if (!netif_running(tp->dev)) {
13595 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13596 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13598 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13602 err = tg3_reset_hw(tp, true);
13604 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13605 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13607 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13611 if (tg3_flag(tp, ENABLE_RSS)) {
13614 /* Reroute all rx packets to the 1st queue */
13615 for (i = MAC_RSS_INDIR_TBL_0;
13616 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13620 /* HW errata - mac loopback fails in some cases on 5780.
13621 * Normal traffic and PHY loopback are not affected by
13622 * errata. Also, the MAC loopback test is deprecated for
13623 * all newer ASIC revisions.
13625 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13626 !tg3_flag(tp, CPMU_PRESENT)) {
13627 tg3_mac_loopback(tp, true);
13629 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13630 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13632 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13633 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13634 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13636 tg3_mac_loopback(tp, false);
13639 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13640 !tg3_flag(tp, USE_PHYLIB)) {
13643 tg3_phy_lpbk_set(tp, 0, false);
13645 /* Wait for link */
13646 for (i = 0; i < 100; i++) {
13647 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13652 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13653 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13654 if (tg3_flag(tp, TSO_CAPABLE) &&
13655 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13656 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13657 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13658 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13659 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13662 tg3_phy_lpbk_set(tp, 0, true);
13664 /* All link indications report up, but the hardware
13665 * isn't really ready for about 20 msec. Double it
13670 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13671 data[TG3_EXT_LOOPB_TEST] |=
13672 TG3_STD_LOOPBACK_FAILED;
13673 if (tg3_flag(tp, TSO_CAPABLE) &&
13674 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13675 data[TG3_EXT_LOOPB_TEST] |=
13676 TG3_TSO_LOOPBACK_FAILED;
13677 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13678 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13679 data[TG3_EXT_LOOPB_TEST] |=
13680 TG3_JMB_LOOPBACK_FAILED;
13683 /* Re-enable gphy autopowerdown. */
13684 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13685 tg3_phy_toggle_apd(tp, true);
13688 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13689 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13692 tp->phy_flags |= eee_cap;
13697 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13700 struct tg3 *tp = netdev_priv(dev);
13701 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13703 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13704 if (tg3_power_up(tp)) {
13705 etest->flags |= ETH_TEST_FL_FAILED;
13706 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13709 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13712 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13714 if (tg3_test_nvram(tp) != 0) {
13715 etest->flags |= ETH_TEST_FL_FAILED;
13716 data[TG3_NVRAM_TEST] = 1;
13718 if (!doextlpbk && tg3_test_link(tp)) {
13719 etest->flags |= ETH_TEST_FL_FAILED;
13720 data[TG3_LINK_TEST] = 1;
13722 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13723 int err, err2 = 0, irq_sync = 0;
13725 if (netif_running(dev)) {
13727 tg3_netif_stop(tp);
13731 tg3_full_lock(tp, irq_sync);
13732 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13733 err = tg3_nvram_lock(tp);
13734 tg3_halt_cpu(tp, RX_CPU_BASE);
13735 if (!tg3_flag(tp, 5705_PLUS))
13736 tg3_halt_cpu(tp, TX_CPU_BASE);
13738 tg3_nvram_unlock(tp);
13740 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13743 if (tg3_test_registers(tp) != 0) {
13744 etest->flags |= ETH_TEST_FL_FAILED;
13745 data[TG3_REGISTER_TEST] = 1;
13748 if (tg3_test_memory(tp) != 0) {
13749 etest->flags |= ETH_TEST_FL_FAILED;
13750 data[TG3_MEMORY_TEST] = 1;
13754 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13756 if (tg3_test_loopback(tp, data, doextlpbk))
13757 etest->flags |= ETH_TEST_FL_FAILED;
13759 tg3_full_unlock(tp);
13761 if (tg3_test_interrupt(tp) != 0) {
13762 etest->flags |= ETH_TEST_FL_FAILED;
13763 data[TG3_INTERRUPT_TEST] = 1;
13766 tg3_full_lock(tp, 0);
13768 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13769 if (netif_running(dev)) {
13770 tg3_flag_set(tp, INIT_COMPLETE);
13771 err2 = tg3_restart_hw(tp, true);
13773 tg3_netif_start(tp);
13776 tg3_full_unlock(tp);
13778 if (irq_sync && !err2)
13781 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13782 tg3_power_down_prepare(tp);
13786 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13788 struct tg3 *tp = netdev_priv(dev);
13789 struct hwtstamp_config stmpconf;
13791 if (!tg3_flag(tp, PTP_CAPABLE))
13792 return -EOPNOTSUPP;
13794 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13797 if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13798 stmpconf.tx_type != HWTSTAMP_TX_OFF)
13801 switch (stmpconf.rx_filter) {
13802 case HWTSTAMP_FILTER_NONE:
13805 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13806 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13807 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13809 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13810 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13811 TG3_RX_PTP_CTL_SYNC_EVNT;
13813 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13814 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13815 TG3_RX_PTP_CTL_DELAY_REQ;
13817 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13818 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13819 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13821 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13822 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13823 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13825 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13826 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13827 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13829 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13830 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13831 TG3_RX_PTP_CTL_SYNC_EVNT;
13833 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13834 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13835 TG3_RX_PTP_CTL_SYNC_EVNT;
13837 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13838 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13839 TG3_RX_PTP_CTL_SYNC_EVNT;
13841 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13842 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13843 TG3_RX_PTP_CTL_DELAY_REQ;
13845 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13846 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13847 TG3_RX_PTP_CTL_DELAY_REQ;
13849 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13850 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13851 TG3_RX_PTP_CTL_DELAY_REQ;
13857 if (netif_running(dev) && tp->rxptpctl)
13858 tw32(TG3_RX_PTP_CTL,
13859 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13861 if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13862 tg3_flag_set(tp, TX_TSTAMP_EN);
13864 tg3_flag_clear(tp, TX_TSTAMP_EN);
13866 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13870 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13872 struct tg3 *tp = netdev_priv(dev);
13873 struct hwtstamp_config stmpconf;
13875 if (!tg3_flag(tp, PTP_CAPABLE))
13876 return -EOPNOTSUPP;
13878 stmpconf.flags = 0;
13879 stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13880 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13882 switch (tp->rxptpctl) {
13884 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13886 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13887 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13889 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13890 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13892 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13893 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13895 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13896 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13898 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13899 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13901 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13902 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13904 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13905 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13907 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13908 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13910 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13911 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13913 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13914 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13916 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13917 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13919 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13920 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13927 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13931 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13933 struct mii_ioctl_data *data = if_mii(ifr);
13934 struct tg3 *tp = netdev_priv(dev);
13937 if (tg3_flag(tp, USE_PHYLIB)) {
13938 struct phy_device *phydev;
13939 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13941 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
13942 return phy_mii_ioctl(phydev, ifr, cmd);
13947 data->phy_id = tp->phy_addr;
13950 case SIOCGMIIREG: {
13953 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13954 break; /* We have no PHY */
13956 if (!netif_running(dev))
13959 spin_lock_bh(&tp->lock);
13960 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13961 data->reg_num & 0x1f, &mii_regval);
13962 spin_unlock_bh(&tp->lock);
13964 data->val_out = mii_regval;
13970 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13971 break; /* We have no PHY */
13973 if (!netif_running(dev))
13976 spin_lock_bh(&tp->lock);
13977 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13978 data->reg_num & 0x1f, data->val_in);
13979 spin_unlock_bh(&tp->lock);
13983 case SIOCSHWTSTAMP:
13984 return tg3_hwtstamp_set(dev, ifr);
13986 case SIOCGHWTSTAMP:
13987 return tg3_hwtstamp_get(dev, ifr);
13993 return -EOPNOTSUPP;
13996 static int tg3_get_coalesce(struct net_device *dev,
13997 struct ethtool_coalesce *ec,
13998 struct kernel_ethtool_coalesce *kernel_coal,
13999 struct netlink_ext_ack *extack)
14001 struct tg3 *tp = netdev_priv(dev);
14003 memcpy(ec, &tp->coal, sizeof(*ec));
14007 static int tg3_set_coalesce(struct net_device *dev,
14008 struct ethtool_coalesce *ec,
14009 struct kernel_ethtool_coalesce *kernel_coal,
14010 struct netlink_ext_ack *extack)
14012 struct tg3 *tp = netdev_priv(dev);
14013 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14014 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14016 if (!tg3_flag(tp, 5705_PLUS)) {
14017 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14018 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14019 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14020 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14023 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14024 (!ec->rx_coalesce_usecs) ||
14025 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14026 (!ec->tx_coalesce_usecs) ||
14027 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14028 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14029 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14030 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14031 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14032 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14033 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14034 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14037 /* Only copy relevant parameters, ignore all others. */
14038 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14039 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14040 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14041 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14042 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14043 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14044 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14045 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14046 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14048 if (netif_running(dev)) {
14049 tg3_full_lock(tp, 0);
14050 __tg3_set_coalesce(tp, &tp->coal);
14051 tg3_full_unlock(tp);
14056 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14058 struct tg3 *tp = netdev_priv(dev);
14060 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14061 netdev_warn(tp->dev, "Board does not support EEE!\n");
14062 return -EOPNOTSUPP;
14065 if (edata->advertised != tp->eee.advertised) {
14066 netdev_warn(tp->dev,
14067 "Direct manipulation of EEE advertisement is not supported\n");
14071 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14072 netdev_warn(tp->dev,
14073 "Maximal Tx Lpi timer supported is %#x(u)\n",
14074 TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14080 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14081 tg3_warn_mgmt_link_flap(tp);
14083 if (netif_running(tp->dev)) {
14084 tg3_full_lock(tp, 0);
14087 tg3_full_unlock(tp);
14093 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14095 struct tg3 *tp = netdev_priv(dev);
14097 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14098 netdev_warn(tp->dev,
14099 "Board does not support EEE!\n");
14100 return -EOPNOTSUPP;
14107 static const struct ethtool_ops tg3_ethtool_ops = {
14108 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
14109 ETHTOOL_COALESCE_MAX_FRAMES |
14110 ETHTOOL_COALESCE_USECS_IRQ |
14111 ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
14112 ETHTOOL_COALESCE_STATS_BLOCK_USECS,
14113 .get_drvinfo = tg3_get_drvinfo,
14114 .get_regs_len = tg3_get_regs_len,
14115 .get_regs = tg3_get_regs,
14116 .get_wol = tg3_get_wol,
14117 .set_wol = tg3_set_wol,
14118 .get_msglevel = tg3_get_msglevel,
14119 .set_msglevel = tg3_set_msglevel,
14120 .nway_reset = tg3_nway_reset,
14121 .get_link = ethtool_op_get_link,
14122 .get_eeprom_len = tg3_get_eeprom_len,
14123 .get_eeprom = tg3_get_eeprom,
14124 .set_eeprom = tg3_set_eeprom,
14125 .get_ringparam = tg3_get_ringparam,
14126 .set_ringparam = tg3_set_ringparam,
14127 .get_pauseparam = tg3_get_pauseparam,
14128 .set_pauseparam = tg3_set_pauseparam,
14129 .self_test = tg3_self_test,
14130 .get_strings = tg3_get_strings,
14131 .set_phys_id = tg3_set_phys_id,
14132 .get_ethtool_stats = tg3_get_ethtool_stats,
14133 .get_coalesce = tg3_get_coalesce,
14134 .set_coalesce = tg3_set_coalesce,
14135 .get_sset_count = tg3_get_sset_count,
14136 .get_rxnfc = tg3_get_rxnfc,
14137 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
14138 .get_rxfh = tg3_get_rxfh,
14139 .set_rxfh = tg3_set_rxfh,
14140 .get_channels = tg3_get_channels,
14141 .set_channels = tg3_set_channels,
14142 .get_ts_info = tg3_get_ts_info,
14143 .get_eee = tg3_get_eee,
14144 .set_eee = tg3_set_eee,
14145 .get_link_ksettings = tg3_get_link_ksettings,
14146 .set_link_ksettings = tg3_set_link_ksettings,
14149 static void tg3_get_stats64(struct net_device *dev,
14150 struct rtnl_link_stats64 *stats)
14152 struct tg3 *tp = netdev_priv(dev);
14154 spin_lock_bh(&tp->lock);
14155 if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14156 *stats = tp->net_stats_prev;
14157 spin_unlock_bh(&tp->lock);
14161 tg3_get_nstats(tp, stats);
14162 spin_unlock_bh(&tp->lock);
14165 static void tg3_set_rx_mode(struct net_device *dev)
14167 struct tg3 *tp = netdev_priv(dev);
14169 if (!netif_running(dev))
14172 tg3_full_lock(tp, 0);
14173 __tg3_set_rx_mode(dev);
14174 tg3_full_unlock(tp);
14177 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14180 dev->mtu = new_mtu;
14182 if (new_mtu > ETH_DATA_LEN) {
14183 if (tg3_flag(tp, 5780_CLASS)) {
14184 netdev_update_features(dev);
14185 tg3_flag_clear(tp, TSO_CAPABLE);
14187 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14190 if (tg3_flag(tp, 5780_CLASS)) {
14191 tg3_flag_set(tp, TSO_CAPABLE);
14192 netdev_update_features(dev);
14194 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14198 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14200 struct tg3 *tp = netdev_priv(dev);
14202 bool reset_phy = false;
14204 if (!netif_running(dev)) {
14205 /* We'll just catch it later when the
14208 tg3_set_mtu(dev, tp, new_mtu);
14214 tg3_netif_stop(tp);
14216 tg3_set_mtu(dev, tp, new_mtu);
14218 tg3_full_lock(tp, 1);
14220 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14222 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14223 * breaks all requests to 256 bytes.
14225 if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14226 tg3_asic_rev(tp) == ASIC_REV_5717 ||
14227 tg3_asic_rev(tp) == ASIC_REV_5719 ||
14228 tg3_asic_rev(tp) == ASIC_REV_5720)
14231 err = tg3_restart_hw(tp, reset_phy);
14234 tg3_netif_start(tp);
14236 tg3_full_unlock(tp);
14244 static const struct net_device_ops tg3_netdev_ops = {
14245 .ndo_open = tg3_open,
14246 .ndo_stop = tg3_close,
14247 .ndo_start_xmit = tg3_start_xmit,
14248 .ndo_get_stats64 = tg3_get_stats64,
14249 .ndo_validate_addr = eth_validate_addr,
14250 .ndo_set_rx_mode = tg3_set_rx_mode,
14251 .ndo_set_mac_address = tg3_set_mac_addr,
14252 .ndo_eth_ioctl = tg3_ioctl,
14253 .ndo_tx_timeout = tg3_tx_timeout,
14254 .ndo_change_mtu = tg3_change_mtu,
14255 .ndo_fix_features = tg3_fix_features,
14256 .ndo_set_features = tg3_set_features,
14257 #ifdef CONFIG_NET_POLL_CONTROLLER
14258 .ndo_poll_controller = tg3_poll_controller,
14262 static void tg3_get_eeprom_size(struct tg3 *tp)
14264 u32 cursize, val, magic;
14266 tp->nvram_size = EEPROM_CHIP_SIZE;
14268 if (tg3_nvram_read(tp, 0, &magic) != 0)
14271 if ((magic != TG3_EEPROM_MAGIC) &&
14272 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14273 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14277 * Size the chip by reading offsets at increasing powers of two.
14278 * When we encounter our validation signature, we know the addressing
14279 * has wrapped around, and thus have our chip size.
14283 while (cursize < tp->nvram_size) {
14284 if (tg3_nvram_read(tp, cursize, &val) != 0)
14293 tp->nvram_size = cursize;
14296 static void tg3_get_nvram_size(struct tg3 *tp)
14300 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14303 /* Selfboot format */
14304 if (val != TG3_EEPROM_MAGIC) {
14305 tg3_get_eeprom_size(tp);
14309 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14311 /* This is confusing. We want to operate on the
14312 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14313 * call will read from NVRAM and byteswap the data
14314 * according to the byteswapping settings for all
14315 * other register accesses. This ensures the data we
14316 * want will always reside in the lower 16-bits.
14317 * However, the data in NVRAM is in LE format, which
14318 * means the data from the NVRAM read will always be
14319 * opposite the endianness of the CPU. The 16-bit
14320 * byteswap then brings the data to CPU endianness.
14322 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14326 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14329 static void tg3_get_nvram_info(struct tg3 *tp)
14333 nvcfg1 = tr32(NVRAM_CFG1);
14334 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14335 tg3_flag_set(tp, FLASH);
14337 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14338 tw32(NVRAM_CFG1, nvcfg1);
14341 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14342 tg3_flag(tp, 5780_CLASS)) {
14343 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14344 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14345 tp->nvram_jedecnum = JEDEC_ATMEL;
14346 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14347 tg3_flag_set(tp, NVRAM_BUFFERED);
14349 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14350 tp->nvram_jedecnum = JEDEC_ATMEL;
14351 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14353 case FLASH_VENDOR_ATMEL_EEPROM:
14354 tp->nvram_jedecnum = JEDEC_ATMEL;
14355 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14356 tg3_flag_set(tp, NVRAM_BUFFERED);
14358 case FLASH_VENDOR_ST:
14359 tp->nvram_jedecnum = JEDEC_ST;
14360 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14361 tg3_flag_set(tp, NVRAM_BUFFERED);
14363 case FLASH_VENDOR_SAIFUN:
14364 tp->nvram_jedecnum = JEDEC_SAIFUN;
14365 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14367 case FLASH_VENDOR_SST_SMALL:
14368 case FLASH_VENDOR_SST_LARGE:
14369 tp->nvram_jedecnum = JEDEC_SST;
14370 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14374 tp->nvram_jedecnum = JEDEC_ATMEL;
14375 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14376 tg3_flag_set(tp, NVRAM_BUFFERED);
14380 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14382 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14383 case FLASH_5752PAGE_SIZE_256:
14384 tp->nvram_pagesize = 256;
14386 case FLASH_5752PAGE_SIZE_512:
14387 tp->nvram_pagesize = 512;
14389 case FLASH_5752PAGE_SIZE_1K:
14390 tp->nvram_pagesize = 1024;
14392 case FLASH_5752PAGE_SIZE_2K:
14393 tp->nvram_pagesize = 2048;
14395 case FLASH_5752PAGE_SIZE_4K:
14396 tp->nvram_pagesize = 4096;
14398 case FLASH_5752PAGE_SIZE_264:
14399 tp->nvram_pagesize = 264;
14401 case FLASH_5752PAGE_SIZE_528:
14402 tp->nvram_pagesize = 528;
14407 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14411 nvcfg1 = tr32(NVRAM_CFG1);
14413 /* NVRAM protection for TPM */
14414 if (nvcfg1 & (1 << 27))
14415 tg3_flag_set(tp, PROTECTED_NVRAM);
14417 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14418 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14419 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14420 tp->nvram_jedecnum = JEDEC_ATMEL;
14421 tg3_flag_set(tp, NVRAM_BUFFERED);
14423 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14424 tp->nvram_jedecnum = JEDEC_ATMEL;
14425 tg3_flag_set(tp, NVRAM_BUFFERED);
14426 tg3_flag_set(tp, FLASH);
14428 case FLASH_5752VENDOR_ST_M45PE10:
14429 case FLASH_5752VENDOR_ST_M45PE20:
14430 case FLASH_5752VENDOR_ST_M45PE40:
14431 tp->nvram_jedecnum = JEDEC_ST;
14432 tg3_flag_set(tp, NVRAM_BUFFERED);
14433 tg3_flag_set(tp, FLASH);
14437 if (tg3_flag(tp, FLASH)) {
14438 tg3_nvram_get_pagesize(tp, nvcfg1);
14440 /* For eeprom, set pagesize to maximum eeprom size */
14441 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14443 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14444 tw32(NVRAM_CFG1, nvcfg1);
14448 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14450 u32 nvcfg1, protect = 0;
14452 nvcfg1 = tr32(NVRAM_CFG1);
14454 /* NVRAM protection for TPM */
14455 if (nvcfg1 & (1 << 27)) {
14456 tg3_flag_set(tp, PROTECTED_NVRAM);
14460 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14462 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14463 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14464 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14465 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14466 tp->nvram_jedecnum = JEDEC_ATMEL;
14467 tg3_flag_set(tp, NVRAM_BUFFERED);
14468 tg3_flag_set(tp, FLASH);
14469 tp->nvram_pagesize = 264;
14470 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14471 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14472 tp->nvram_size = (protect ? 0x3e200 :
14473 TG3_NVRAM_SIZE_512KB);
14474 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14475 tp->nvram_size = (protect ? 0x1f200 :
14476 TG3_NVRAM_SIZE_256KB);
14478 tp->nvram_size = (protect ? 0x1f200 :
14479 TG3_NVRAM_SIZE_128KB);
14481 case FLASH_5752VENDOR_ST_M45PE10:
14482 case FLASH_5752VENDOR_ST_M45PE20:
14483 case FLASH_5752VENDOR_ST_M45PE40:
14484 tp->nvram_jedecnum = JEDEC_ST;
14485 tg3_flag_set(tp, NVRAM_BUFFERED);
14486 tg3_flag_set(tp, FLASH);
14487 tp->nvram_pagesize = 256;
14488 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14489 tp->nvram_size = (protect ?
14490 TG3_NVRAM_SIZE_64KB :
14491 TG3_NVRAM_SIZE_128KB);
14492 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14493 tp->nvram_size = (protect ?
14494 TG3_NVRAM_SIZE_64KB :
14495 TG3_NVRAM_SIZE_256KB);
14497 tp->nvram_size = (protect ?
14498 TG3_NVRAM_SIZE_128KB :
14499 TG3_NVRAM_SIZE_512KB);
14504 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14508 nvcfg1 = tr32(NVRAM_CFG1);
14510 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14511 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14512 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14513 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14514 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14515 tp->nvram_jedecnum = JEDEC_ATMEL;
14516 tg3_flag_set(tp, NVRAM_BUFFERED);
14517 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14519 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14520 tw32(NVRAM_CFG1, nvcfg1);
14522 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14523 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14524 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14525 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14526 tp->nvram_jedecnum = JEDEC_ATMEL;
14527 tg3_flag_set(tp, NVRAM_BUFFERED);
14528 tg3_flag_set(tp, FLASH);
14529 tp->nvram_pagesize = 264;
14531 case FLASH_5752VENDOR_ST_M45PE10:
14532 case FLASH_5752VENDOR_ST_M45PE20:
14533 case FLASH_5752VENDOR_ST_M45PE40:
14534 tp->nvram_jedecnum = JEDEC_ST;
14535 tg3_flag_set(tp, NVRAM_BUFFERED);
14536 tg3_flag_set(tp, FLASH);
14537 tp->nvram_pagesize = 256;
14542 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14544 u32 nvcfg1, protect = 0;
14546 nvcfg1 = tr32(NVRAM_CFG1);
14548 /* NVRAM protection for TPM */
14549 if (nvcfg1 & (1 << 27)) {
14550 tg3_flag_set(tp, PROTECTED_NVRAM);
14554 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14556 case FLASH_5761VENDOR_ATMEL_ADB021D:
14557 case FLASH_5761VENDOR_ATMEL_ADB041D:
14558 case FLASH_5761VENDOR_ATMEL_ADB081D:
14559 case FLASH_5761VENDOR_ATMEL_ADB161D:
14560 case FLASH_5761VENDOR_ATMEL_MDB021D:
14561 case FLASH_5761VENDOR_ATMEL_MDB041D:
14562 case FLASH_5761VENDOR_ATMEL_MDB081D:
14563 case FLASH_5761VENDOR_ATMEL_MDB161D:
14564 tp->nvram_jedecnum = JEDEC_ATMEL;
14565 tg3_flag_set(tp, NVRAM_BUFFERED);
14566 tg3_flag_set(tp, FLASH);
14567 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14568 tp->nvram_pagesize = 256;
14570 case FLASH_5761VENDOR_ST_A_M45PE20:
14571 case FLASH_5761VENDOR_ST_A_M45PE40:
14572 case FLASH_5761VENDOR_ST_A_M45PE80:
14573 case FLASH_5761VENDOR_ST_A_M45PE16:
14574 case FLASH_5761VENDOR_ST_M_M45PE20:
14575 case FLASH_5761VENDOR_ST_M_M45PE40:
14576 case FLASH_5761VENDOR_ST_M_M45PE80:
14577 case FLASH_5761VENDOR_ST_M_M45PE16:
14578 tp->nvram_jedecnum = JEDEC_ST;
14579 tg3_flag_set(tp, NVRAM_BUFFERED);
14580 tg3_flag_set(tp, FLASH);
14581 tp->nvram_pagesize = 256;
14586 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14589 case FLASH_5761VENDOR_ATMEL_ADB161D:
14590 case FLASH_5761VENDOR_ATMEL_MDB161D:
14591 case FLASH_5761VENDOR_ST_A_M45PE16:
14592 case FLASH_5761VENDOR_ST_M_M45PE16:
14593 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14595 case FLASH_5761VENDOR_ATMEL_ADB081D:
14596 case FLASH_5761VENDOR_ATMEL_MDB081D:
14597 case FLASH_5761VENDOR_ST_A_M45PE80:
14598 case FLASH_5761VENDOR_ST_M_M45PE80:
14599 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14601 case FLASH_5761VENDOR_ATMEL_ADB041D:
14602 case FLASH_5761VENDOR_ATMEL_MDB041D:
14603 case FLASH_5761VENDOR_ST_A_M45PE40:
14604 case FLASH_5761VENDOR_ST_M_M45PE40:
14605 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14607 case FLASH_5761VENDOR_ATMEL_ADB021D:
14608 case FLASH_5761VENDOR_ATMEL_MDB021D:
14609 case FLASH_5761VENDOR_ST_A_M45PE20:
14610 case FLASH_5761VENDOR_ST_M_M45PE20:
14611 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14617 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14619 tp->nvram_jedecnum = JEDEC_ATMEL;
14620 tg3_flag_set(tp, NVRAM_BUFFERED);
14621 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14624 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14628 nvcfg1 = tr32(NVRAM_CFG1);
14630 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14631 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14632 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14633 tp->nvram_jedecnum = JEDEC_ATMEL;
14634 tg3_flag_set(tp, NVRAM_BUFFERED);
14635 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14637 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14638 tw32(NVRAM_CFG1, nvcfg1);
14640 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14641 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14642 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14643 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14644 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14645 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14646 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14647 tp->nvram_jedecnum = JEDEC_ATMEL;
14648 tg3_flag_set(tp, NVRAM_BUFFERED);
14649 tg3_flag_set(tp, FLASH);
14651 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14652 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14653 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14654 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14655 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14657 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14658 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14659 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14661 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14662 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14663 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14667 case FLASH_5752VENDOR_ST_M45PE10:
14668 case FLASH_5752VENDOR_ST_M45PE20:
14669 case FLASH_5752VENDOR_ST_M45PE40:
14670 tp->nvram_jedecnum = JEDEC_ST;
14671 tg3_flag_set(tp, NVRAM_BUFFERED);
14672 tg3_flag_set(tp, FLASH);
14674 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14675 case FLASH_5752VENDOR_ST_M45PE10:
14676 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14678 case FLASH_5752VENDOR_ST_M45PE20:
14679 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14681 case FLASH_5752VENDOR_ST_M45PE40:
14682 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14687 tg3_flag_set(tp, NO_NVRAM);
14691 tg3_nvram_get_pagesize(tp, nvcfg1);
14692 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14693 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14697 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14701 nvcfg1 = tr32(NVRAM_CFG1);
14703 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14704 case FLASH_5717VENDOR_ATMEL_EEPROM:
14705 case FLASH_5717VENDOR_MICRO_EEPROM:
14706 tp->nvram_jedecnum = JEDEC_ATMEL;
14707 tg3_flag_set(tp, NVRAM_BUFFERED);
14708 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14710 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14711 tw32(NVRAM_CFG1, nvcfg1);
14713 case FLASH_5717VENDOR_ATMEL_MDB011D:
14714 case FLASH_5717VENDOR_ATMEL_ADB011B:
14715 case FLASH_5717VENDOR_ATMEL_ADB011D:
14716 case FLASH_5717VENDOR_ATMEL_MDB021D:
14717 case FLASH_5717VENDOR_ATMEL_ADB021B:
14718 case FLASH_5717VENDOR_ATMEL_ADB021D:
14719 case FLASH_5717VENDOR_ATMEL_45USPT:
14720 tp->nvram_jedecnum = JEDEC_ATMEL;
14721 tg3_flag_set(tp, NVRAM_BUFFERED);
14722 tg3_flag_set(tp, FLASH);
14724 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14725 case FLASH_5717VENDOR_ATMEL_MDB021D:
14726 /* Detect size with tg3_nvram_get_size() */
14728 case FLASH_5717VENDOR_ATMEL_ADB021B:
14729 case FLASH_5717VENDOR_ATMEL_ADB021D:
14730 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14733 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14737 case FLASH_5717VENDOR_ST_M_M25PE10:
14738 case FLASH_5717VENDOR_ST_A_M25PE10:
14739 case FLASH_5717VENDOR_ST_M_M45PE10:
14740 case FLASH_5717VENDOR_ST_A_M45PE10:
14741 case FLASH_5717VENDOR_ST_M_M25PE20:
14742 case FLASH_5717VENDOR_ST_A_M25PE20:
14743 case FLASH_5717VENDOR_ST_M_M45PE20:
14744 case FLASH_5717VENDOR_ST_A_M45PE20:
14745 case FLASH_5717VENDOR_ST_25USPT:
14746 case FLASH_5717VENDOR_ST_45USPT:
14747 tp->nvram_jedecnum = JEDEC_ST;
14748 tg3_flag_set(tp, NVRAM_BUFFERED);
14749 tg3_flag_set(tp, FLASH);
14751 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14752 case FLASH_5717VENDOR_ST_M_M25PE20:
14753 case FLASH_5717VENDOR_ST_M_M45PE20:
14754 /* Detect size with tg3_nvram_get_size() */
14756 case FLASH_5717VENDOR_ST_A_M25PE20:
14757 case FLASH_5717VENDOR_ST_A_M45PE20:
14758 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14761 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14766 tg3_flag_set(tp, NO_NVRAM);
14770 tg3_nvram_get_pagesize(tp, nvcfg1);
14771 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14772 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14775 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14777 u32 nvcfg1, nvmpinstrp, nv_status;
14779 nvcfg1 = tr32(NVRAM_CFG1);
14780 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14782 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14783 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14784 tg3_flag_set(tp, NO_NVRAM);
14788 switch (nvmpinstrp) {
14789 case FLASH_5762_MX25L_100:
14790 case FLASH_5762_MX25L_200:
14791 case FLASH_5762_MX25L_400:
14792 case FLASH_5762_MX25L_800:
14793 case FLASH_5762_MX25L_160_320:
14794 tp->nvram_pagesize = 4096;
14795 tp->nvram_jedecnum = JEDEC_MACRONIX;
14796 tg3_flag_set(tp, NVRAM_BUFFERED);
14797 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14798 tg3_flag_set(tp, FLASH);
14799 nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14801 (1 << (nv_status >> AUTOSENSE_DEVID &
14802 AUTOSENSE_DEVID_MASK)
14803 << AUTOSENSE_SIZE_IN_MB);
14806 case FLASH_5762_EEPROM_HD:
14807 nvmpinstrp = FLASH_5720_EEPROM_HD;
14809 case FLASH_5762_EEPROM_LD:
14810 nvmpinstrp = FLASH_5720_EEPROM_LD;
14812 case FLASH_5720VENDOR_M_ST_M45PE20:
14813 /* This pinstrap supports multiple sizes, so force it
14814 * to read the actual size from location 0xf0.
14816 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14821 switch (nvmpinstrp) {
14822 case FLASH_5720_EEPROM_HD:
14823 case FLASH_5720_EEPROM_LD:
14824 tp->nvram_jedecnum = JEDEC_ATMEL;
14825 tg3_flag_set(tp, NVRAM_BUFFERED);
14827 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14828 tw32(NVRAM_CFG1, nvcfg1);
14829 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14830 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14832 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14834 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14835 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14836 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14837 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14838 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14839 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14840 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14841 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14842 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14843 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14844 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14845 case FLASH_5720VENDOR_ATMEL_45USPT:
14846 tp->nvram_jedecnum = JEDEC_ATMEL;
14847 tg3_flag_set(tp, NVRAM_BUFFERED);
14848 tg3_flag_set(tp, FLASH);
14850 switch (nvmpinstrp) {
14851 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14852 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14853 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14854 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14856 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14857 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14858 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14859 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14861 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14862 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14863 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14866 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14867 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14871 case FLASH_5720VENDOR_M_ST_M25PE10:
14872 case FLASH_5720VENDOR_M_ST_M45PE10:
14873 case FLASH_5720VENDOR_A_ST_M25PE10:
14874 case FLASH_5720VENDOR_A_ST_M45PE10:
14875 case FLASH_5720VENDOR_M_ST_M25PE20:
14876 case FLASH_5720VENDOR_M_ST_M45PE20:
14877 case FLASH_5720VENDOR_A_ST_M25PE20:
14878 case FLASH_5720VENDOR_A_ST_M45PE20:
14879 case FLASH_5720VENDOR_M_ST_M25PE40:
14880 case FLASH_5720VENDOR_M_ST_M45PE40:
14881 case FLASH_5720VENDOR_A_ST_M25PE40:
14882 case FLASH_5720VENDOR_A_ST_M45PE40:
14883 case FLASH_5720VENDOR_M_ST_M25PE80:
14884 case FLASH_5720VENDOR_M_ST_M45PE80:
14885 case FLASH_5720VENDOR_A_ST_M25PE80:
14886 case FLASH_5720VENDOR_A_ST_M45PE80:
14887 case FLASH_5720VENDOR_ST_25USPT:
14888 case FLASH_5720VENDOR_ST_45USPT:
14889 tp->nvram_jedecnum = JEDEC_ST;
14890 tg3_flag_set(tp, NVRAM_BUFFERED);
14891 tg3_flag_set(tp, FLASH);
14893 switch (nvmpinstrp) {
14894 case FLASH_5720VENDOR_M_ST_M25PE20:
14895 case FLASH_5720VENDOR_M_ST_M45PE20:
14896 case FLASH_5720VENDOR_A_ST_M25PE20:
14897 case FLASH_5720VENDOR_A_ST_M45PE20:
14898 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14900 case FLASH_5720VENDOR_M_ST_M25PE40:
14901 case FLASH_5720VENDOR_M_ST_M45PE40:
14902 case FLASH_5720VENDOR_A_ST_M25PE40:
14903 case FLASH_5720VENDOR_A_ST_M45PE40:
14904 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14906 case FLASH_5720VENDOR_M_ST_M25PE80:
14907 case FLASH_5720VENDOR_M_ST_M45PE80:
14908 case FLASH_5720VENDOR_A_ST_M25PE80:
14909 case FLASH_5720VENDOR_A_ST_M45PE80:
14910 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14913 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14914 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14919 tg3_flag_set(tp, NO_NVRAM);
14923 tg3_nvram_get_pagesize(tp, nvcfg1);
14924 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14925 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14927 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14930 if (tg3_nvram_read(tp, 0, &val))
14933 if (val != TG3_EEPROM_MAGIC &&
14934 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14935 tg3_flag_set(tp, NO_NVRAM);
14939 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14940 static void tg3_nvram_init(struct tg3 *tp)
14942 if (tg3_flag(tp, IS_SSB_CORE)) {
14943 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14944 tg3_flag_clear(tp, NVRAM);
14945 tg3_flag_clear(tp, NVRAM_BUFFERED);
14946 tg3_flag_set(tp, NO_NVRAM);
14950 tw32_f(GRC_EEPROM_ADDR,
14951 (EEPROM_ADDR_FSM_RESET |
14952 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14953 EEPROM_ADDR_CLKPERD_SHIFT)));
14957 /* Enable seeprom accesses. */
14958 tw32_f(GRC_LOCAL_CTRL,
14959 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14962 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14963 tg3_asic_rev(tp) != ASIC_REV_5701) {
14964 tg3_flag_set(tp, NVRAM);
14966 if (tg3_nvram_lock(tp)) {
14967 netdev_warn(tp->dev,
14968 "Cannot get nvram lock, %s failed\n",
14972 tg3_enable_nvram_access(tp);
14974 tp->nvram_size = 0;
14976 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14977 tg3_get_5752_nvram_info(tp);
14978 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14979 tg3_get_5755_nvram_info(tp);
14980 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14981 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14982 tg3_asic_rev(tp) == ASIC_REV_5785)
14983 tg3_get_5787_nvram_info(tp);
14984 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14985 tg3_get_5761_nvram_info(tp);
14986 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14987 tg3_get_5906_nvram_info(tp);
14988 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14989 tg3_flag(tp, 57765_CLASS))
14990 tg3_get_57780_nvram_info(tp);
14991 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14992 tg3_asic_rev(tp) == ASIC_REV_5719)
14993 tg3_get_5717_nvram_info(tp);
14994 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14995 tg3_asic_rev(tp) == ASIC_REV_5762)
14996 tg3_get_5720_nvram_info(tp);
14998 tg3_get_nvram_info(tp);
15000 if (tp->nvram_size == 0)
15001 tg3_get_nvram_size(tp);
15003 tg3_disable_nvram_access(tp);
15004 tg3_nvram_unlock(tp);
15007 tg3_flag_clear(tp, NVRAM);
15008 tg3_flag_clear(tp, NVRAM_BUFFERED);
15010 tg3_get_eeprom_size(tp);
15014 struct subsys_tbl_ent {
15015 u16 subsys_vendor, subsys_devid;
15019 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15020 /* Broadcom boards. */
15021 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15022 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15023 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15024 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15025 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15026 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15027 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15028 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15029 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15030 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15031 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15032 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15033 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15034 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15035 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15036 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15037 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15038 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15039 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15040 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15041 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15042 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15045 { TG3PCI_SUBVENDOR_ID_3COM,
15046 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15047 { TG3PCI_SUBVENDOR_ID_3COM,
15048 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15049 { TG3PCI_SUBVENDOR_ID_3COM,
15050 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15051 { TG3PCI_SUBVENDOR_ID_3COM,
15052 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15053 { TG3PCI_SUBVENDOR_ID_3COM,
15054 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15057 { TG3PCI_SUBVENDOR_ID_DELL,
15058 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15059 { TG3PCI_SUBVENDOR_ID_DELL,
15060 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15061 { TG3PCI_SUBVENDOR_ID_DELL,
15062 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15063 { TG3PCI_SUBVENDOR_ID_DELL,
15064 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15066 /* Compaq boards. */
15067 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15068 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15069 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15070 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15071 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15072 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15073 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15074 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15075 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15076 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15079 { TG3PCI_SUBVENDOR_ID_IBM,
15080 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15083 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15087 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15088 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15089 tp->pdev->subsystem_vendor) &&
15090 (subsys_id_to_phy_id[i].subsys_devid ==
15091 tp->pdev->subsystem_device))
15092 return &subsys_id_to_phy_id[i];
15097 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15101 tp->phy_id = TG3_PHY_ID_INVALID;
15102 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15104 /* Assume an onboard device and WOL capable by default. */
15105 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15106 tg3_flag_set(tp, WOL_CAP);
15108 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15109 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15110 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15111 tg3_flag_set(tp, IS_NIC);
15113 val = tr32(VCPU_CFGSHDW);
15114 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15115 tg3_flag_set(tp, ASPM_WORKAROUND);
15116 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15117 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15118 tg3_flag_set(tp, WOL_ENABLE);
15119 device_set_wakeup_enable(&tp->pdev->dev, true);
15124 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15125 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15126 u32 nic_cfg, led_cfg;
15127 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15128 u32 nic_phy_id, ver, eeprom_phy_id;
15129 int eeprom_phy_serdes = 0;
15131 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15132 tp->nic_sram_data_cfg = nic_cfg;
15134 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15135 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15136 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15137 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15138 tg3_asic_rev(tp) != ASIC_REV_5703 &&
15139 (ver > 0) && (ver < 0x100))
15140 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15142 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15143 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15145 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15146 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15147 tg3_asic_rev(tp) == ASIC_REV_5720)
15148 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15150 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15151 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15152 eeprom_phy_serdes = 1;
15154 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15155 if (nic_phy_id != 0) {
15156 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15157 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15159 eeprom_phy_id = (id1 >> 16) << 10;
15160 eeprom_phy_id |= (id2 & 0xfc00) << 16;
15161 eeprom_phy_id |= (id2 & 0x03ff) << 0;
15165 tp->phy_id = eeprom_phy_id;
15166 if (eeprom_phy_serdes) {
15167 if (!tg3_flag(tp, 5705_PLUS))
15168 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15170 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15173 if (tg3_flag(tp, 5750_PLUS))
15174 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15175 SHASTA_EXT_LED_MODE_MASK);
15177 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15181 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15182 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15185 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15186 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15189 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15190 tp->led_ctrl = LED_CTRL_MODE_MAC;
15192 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15193 * read on some older 5700/5701 bootcode.
15195 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15196 tg3_asic_rev(tp) == ASIC_REV_5701)
15197 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15201 case SHASTA_EXT_LED_SHARED:
15202 tp->led_ctrl = LED_CTRL_MODE_SHARED;
15203 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15204 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15205 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15206 LED_CTRL_MODE_PHY_2);
15208 if (tg3_flag(tp, 5717_PLUS) ||
15209 tg3_asic_rev(tp) == ASIC_REV_5762)
15210 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15211 LED_CTRL_BLINK_RATE_MASK;
15215 case SHASTA_EXT_LED_MAC:
15216 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15219 case SHASTA_EXT_LED_COMBO:
15220 tp->led_ctrl = LED_CTRL_MODE_COMBO;
15221 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15222 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15223 LED_CTRL_MODE_PHY_2);
15228 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15229 tg3_asic_rev(tp) == ASIC_REV_5701) &&
15230 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15231 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15233 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15234 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15236 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15237 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15238 if ((tp->pdev->subsystem_vendor ==
15239 PCI_VENDOR_ID_ARIMA) &&
15240 (tp->pdev->subsystem_device == 0x205a ||
15241 tp->pdev->subsystem_device == 0x2063))
15242 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15244 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15245 tg3_flag_set(tp, IS_NIC);
15248 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15249 tg3_flag_set(tp, ENABLE_ASF);
15250 if (tg3_flag(tp, 5750_PLUS))
15251 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15254 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15255 tg3_flag(tp, 5750_PLUS))
15256 tg3_flag_set(tp, ENABLE_APE);
15258 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15259 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15260 tg3_flag_clear(tp, WOL_CAP);
15262 if (tg3_flag(tp, WOL_CAP) &&
15263 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15264 tg3_flag_set(tp, WOL_ENABLE);
15265 device_set_wakeup_enable(&tp->pdev->dev, true);
15268 if (cfg2 & (1 << 17))
15269 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15271 /* serdes signal pre-emphasis in register 0x590 set by */
15272 /* bootcode if bit 18 is set */
15273 if (cfg2 & (1 << 18))
15274 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15276 if ((tg3_flag(tp, 57765_PLUS) ||
15277 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15278 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15279 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15280 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15282 if (tg3_flag(tp, PCI_EXPRESS)) {
15285 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15286 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15287 !tg3_flag(tp, 57765_PLUS) &&
15288 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15289 tg3_flag_set(tp, ASPM_WORKAROUND);
15290 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15291 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15292 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15293 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15296 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15297 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15298 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15299 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15300 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15301 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15303 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15304 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15307 if (tg3_flag(tp, WOL_CAP))
15308 device_set_wakeup_enable(&tp->pdev->dev,
15309 tg3_flag(tp, WOL_ENABLE));
15311 device_set_wakeup_capable(&tp->pdev->dev, false);
15314 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15317 u32 val2, off = offset * 8;
15319 err = tg3_nvram_lock(tp);
15323 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15324 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15325 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15326 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15329 for (i = 0; i < 100; i++) {
15330 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15331 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15332 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15338 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15340 tg3_nvram_unlock(tp);
15341 if (val2 & APE_OTP_STATUS_CMD_DONE)
15347 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15352 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15353 tw32(OTP_CTRL, cmd);
15355 /* Wait for up to 1 ms for command to execute. */
15356 for (i = 0; i < 100; i++) {
15357 val = tr32(OTP_STATUS);
15358 if (val & OTP_STATUS_CMD_DONE)
15363 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15366 /* Read the gphy configuration from the OTP region of the chip. The gphy
15367 * configuration is a 32-bit value that straddles the alignment boundary.
15368 * We do two 32-bit reads and then shift and merge the results.
15370 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15372 u32 bhalf_otp, thalf_otp;
15374 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15376 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15379 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15381 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15384 thalf_otp = tr32(OTP_READ_DATA);
15386 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15388 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15391 bhalf_otp = tr32(OTP_READ_DATA);
15393 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15396 static void tg3_phy_init_link_config(struct tg3 *tp)
15398 u32 adv = ADVERTISED_Autoneg;
15400 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15401 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15402 adv |= ADVERTISED_1000baseT_Half;
15403 adv |= ADVERTISED_1000baseT_Full;
15406 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15407 adv |= ADVERTISED_100baseT_Half |
15408 ADVERTISED_100baseT_Full |
15409 ADVERTISED_10baseT_Half |
15410 ADVERTISED_10baseT_Full |
15413 adv |= ADVERTISED_FIBRE;
15415 tp->link_config.advertising = adv;
15416 tp->link_config.speed = SPEED_UNKNOWN;
15417 tp->link_config.duplex = DUPLEX_UNKNOWN;
15418 tp->link_config.autoneg = AUTONEG_ENABLE;
15419 tp->link_config.active_speed = SPEED_UNKNOWN;
15420 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15425 static int tg3_phy_probe(struct tg3 *tp)
15427 u32 hw_phy_id_1, hw_phy_id_2;
15428 u32 hw_phy_id, hw_phy_id_masked;
15431 /* flow control autonegotiation is default behavior */
15432 tg3_flag_set(tp, PAUSE_AUTONEG);
15433 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15435 if (tg3_flag(tp, ENABLE_APE)) {
15436 switch (tp->pci_fn) {
15438 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15441 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15444 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15447 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15452 if (!tg3_flag(tp, ENABLE_ASF) &&
15453 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15454 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15455 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15456 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15458 if (tg3_flag(tp, USE_PHYLIB))
15459 return tg3_phy_init(tp);
15461 /* Reading the PHY ID register can conflict with ASF
15462 * firmware access to the PHY hardware.
15465 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15466 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15468 /* Now read the physical PHY_ID from the chip and verify
15469 * that it is sane. If it doesn't look good, we fall back
15470 * to either the hard-coded table based PHY_ID and failing
15471 * that the value found in the eeprom area.
15473 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15474 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15476 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15477 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15478 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15480 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15483 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15484 tp->phy_id = hw_phy_id;
15485 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15486 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15488 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15490 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15491 /* Do nothing, phy ID already set up in
15492 * tg3_get_eeprom_hw_cfg().
15495 struct subsys_tbl_ent *p;
15497 /* No eeprom signature? Try the hardcoded
15498 * subsys device table.
15500 p = tg3_lookup_by_subsys(tp);
15502 tp->phy_id = p->phy_id;
15503 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15504 /* For now we saw the IDs 0xbc050cd0,
15505 * 0xbc050f80 and 0xbc050c30 on devices
15506 * connected to an BCM4785 and there are
15507 * probably more. Just assume that the phy is
15508 * supported when it is connected to a SSB core
15515 tp->phy_id == TG3_PHY_ID_BCM8002)
15516 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15520 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15521 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15522 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15523 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15524 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15525 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15526 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15527 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15528 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15529 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15531 tp->eee.supported = SUPPORTED_100baseT_Full |
15532 SUPPORTED_1000baseT_Full;
15533 tp->eee.advertised = ADVERTISED_100baseT_Full |
15534 ADVERTISED_1000baseT_Full;
15535 tp->eee.eee_enabled = 1;
15536 tp->eee.tx_lpi_enabled = 1;
15537 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15540 tg3_phy_init_link_config(tp);
15542 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15543 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15544 !tg3_flag(tp, ENABLE_APE) &&
15545 !tg3_flag(tp, ENABLE_ASF)) {
15548 tg3_readphy(tp, MII_BMSR, &bmsr);
15549 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15550 (bmsr & BMSR_LSTATUS))
15551 goto skip_phy_reset;
15553 err = tg3_phy_reset(tp);
15557 tg3_phy_set_wirespeed(tp);
15559 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15560 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15561 tp->link_config.flowctrl);
15563 tg3_writephy(tp, MII_BMCR,
15564 BMCR_ANENABLE | BMCR_ANRESTART);
15569 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15570 err = tg3_init_5401phy_dsp(tp);
15574 err = tg3_init_5401phy_dsp(tp);
15580 static void tg3_read_vpd(struct tg3 *tp)
15583 unsigned int len, vpdlen;
15586 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15590 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15591 PCI_VPD_RO_KEYWORD_MFR_ID, &len);
15595 if (len != 4 || memcmp(vpd_data + i, "1028", 4))
15598 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15599 PCI_VPD_RO_KEYWORD_VENDOR0, &len);
15603 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15604 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i);
15607 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15608 PCI_VPD_RO_KEYWORD_PARTNO, &len);
15610 goto out_not_found;
15612 if (len > TG3_BPN_SIZE)
15613 goto out_not_found;
15615 memcpy(tp->board_part_number, &vpd_data[i], len);
15619 if (tp->board_part_number[0])
15623 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15624 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15625 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15626 strcpy(tp->board_part_number, "BCM5717");
15627 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15628 strcpy(tp->board_part_number, "BCM5718");
15631 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15632 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15633 strcpy(tp->board_part_number, "BCM57780");
15634 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15635 strcpy(tp->board_part_number, "BCM57760");
15636 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15637 strcpy(tp->board_part_number, "BCM57790");
15638 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15639 strcpy(tp->board_part_number, "BCM57788");
15642 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15643 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15644 strcpy(tp->board_part_number, "BCM57761");
15645 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15646 strcpy(tp->board_part_number, "BCM57765");
15647 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15648 strcpy(tp->board_part_number, "BCM57781");
15649 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15650 strcpy(tp->board_part_number, "BCM57785");
15651 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15652 strcpy(tp->board_part_number, "BCM57791");
15653 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15654 strcpy(tp->board_part_number, "BCM57795");
15657 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15658 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15659 strcpy(tp->board_part_number, "BCM57762");
15660 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15661 strcpy(tp->board_part_number, "BCM57766");
15662 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15663 strcpy(tp->board_part_number, "BCM57782");
15664 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15665 strcpy(tp->board_part_number, "BCM57786");
15668 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15669 strcpy(tp->board_part_number, "BCM95906");
15672 strcpy(tp->board_part_number, "none");
15676 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15680 if (tg3_nvram_read(tp, offset, &val) ||
15681 (val & 0xfc000000) != 0x0c000000 ||
15682 tg3_nvram_read(tp, offset + 4, &val) ||
15689 static void tg3_read_bc_ver(struct tg3 *tp)
15691 u32 val, offset, start, ver_offset;
15693 bool newver = false;
15695 if (tg3_nvram_read(tp, 0xc, &offset) ||
15696 tg3_nvram_read(tp, 0x4, &start))
15699 offset = tg3_nvram_logical_addr(tp, offset);
15701 if (tg3_nvram_read(tp, offset, &val))
15704 if ((val & 0xfc000000) == 0x0c000000) {
15705 if (tg3_nvram_read(tp, offset + 4, &val))
15712 dst_off = strlen(tp->fw_ver);
15715 if (TG3_VER_SIZE - dst_off < 16 ||
15716 tg3_nvram_read(tp, offset + 8, &ver_offset))
15719 offset = offset + ver_offset - start;
15720 for (i = 0; i < 16; i += 4) {
15722 if (tg3_nvram_read_be32(tp, offset + i, &v))
15725 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15730 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15733 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15734 TG3_NVM_BCVER_MAJSFT;
15735 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15736 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15737 "v%d.%02d", major, minor);
15741 static void tg3_read_hwsb_ver(struct tg3 *tp)
15743 u32 val, major, minor;
15745 /* Use native endian representation */
15746 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15749 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15750 TG3_NVM_HWSB_CFG1_MAJSFT;
15751 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15752 TG3_NVM_HWSB_CFG1_MINSFT;
15754 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15757 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15759 u32 offset, major, minor, build;
15761 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15763 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15766 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15767 case TG3_EEPROM_SB_REVISION_0:
15768 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15770 case TG3_EEPROM_SB_REVISION_2:
15771 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15773 case TG3_EEPROM_SB_REVISION_3:
15774 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15776 case TG3_EEPROM_SB_REVISION_4:
15777 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15779 case TG3_EEPROM_SB_REVISION_5:
15780 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15782 case TG3_EEPROM_SB_REVISION_6:
15783 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15789 if (tg3_nvram_read(tp, offset, &val))
15792 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15793 TG3_EEPROM_SB_EDH_BLD_SHFT;
15794 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15795 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15796 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15798 if (minor > 99 || build > 26)
15801 offset = strlen(tp->fw_ver);
15802 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15803 " v%d.%02d", major, minor);
15806 offset = strlen(tp->fw_ver);
15807 if (offset < TG3_VER_SIZE - 1)
15808 tp->fw_ver[offset] = 'a' + build - 1;
15812 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15814 u32 val, offset, start;
15817 for (offset = TG3_NVM_DIR_START;
15818 offset < TG3_NVM_DIR_END;
15819 offset += TG3_NVM_DIRENT_SIZE) {
15820 if (tg3_nvram_read(tp, offset, &val))
15823 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15827 if (offset == TG3_NVM_DIR_END)
15830 if (!tg3_flag(tp, 5705_PLUS))
15831 start = 0x08000000;
15832 else if (tg3_nvram_read(tp, offset - 4, &start))
15835 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15836 !tg3_fw_img_is_valid(tp, offset) ||
15837 tg3_nvram_read(tp, offset + 8, &val))
15840 offset += val - start;
15842 vlen = strlen(tp->fw_ver);
15844 tp->fw_ver[vlen++] = ',';
15845 tp->fw_ver[vlen++] = ' ';
15847 for (i = 0; i < 4; i++) {
15849 if (tg3_nvram_read_be32(tp, offset, &v))
15852 offset += sizeof(v);
15854 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15855 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15859 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15864 static void tg3_probe_ncsi(struct tg3 *tp)
15868 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15869 if (apedata != APE_SEG_SIG_MAGIC)
15872 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15873 if (!(apedata & APE_FW_STATUS_READY))
15876 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15877 tg3_flag_set(tp, APE_HAS_NCSI);
15880 static void tg3_read_dash_ver(struct tg3 *tp)
15886 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15888 if (tg3_flag(tp, APE_HAS_NCSI))
15890 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15895 vlen = strlen(tp->fw_ver);
15897 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15899 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15900 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15901 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15902 (apedata & APE_FW_VERSION_BLDMSK));
15905 static void tg3_read_otp_ver(struct tg3 *tp)
15909 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15912 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15913 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15914 TG3_OTP_MAGIC0_VALID(val)) {
15915 u64 val64 = (u64) val << 32 | val2;
15919 for (i = 0; i < 7; i++) {
15920 if ((val64 & 0xff) == 0)
15922 ver = val64 & 0xff;
15925 vlen = strlen(tp->fw_ver);
15926 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15930 static void tg3_read_fw_ver(struct tg3 *tp)
15933 bool vpd_vers = false;
15935 if (tp->fw_ver[0] != 0)
15938 if (tg3_flag(tp, NO_NVRAM)) {
15939 strcat(tp->fw_ver, "sb");
15940 tg3_read_otp_ver(tp);
15944 if (tg3_nvram_read(tp, 0, &val))
15947 if (val == TG3_EEPROM_MAGIC)
15948 tg3_read_bc_ver(tp);
15949 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15950 tg3_read_sb_ver(tp, val);
15951 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15952 tg3_read_hwsb_ver(tp);
15954 if (tg3_flag(tp, ENABLE_ASF)) {
15955 if (tg3_flag(tp, ENABLE_APE)) {
15956 tg3_probe_ncsi(tp);
15958 tg3_read_dash_ver(tp);
15959 } else if (!vpd_vers) {
15960 tg3_read_mgmtfw_ver(tp);
15964 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15967 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15969 if (tg3_flag(tp, LRG_PROD_RING_CAP))
15970 return TG3_RX_RET_MAX_SIZE_5717;
15971 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15972 return TG3_RX_RET_MAX_SIZE_5700;
15974 return TG3_RX_RET_MAX_SIZE_5705;
15977 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
15978 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15979 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15980 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15984 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15986 struct pci_dev *peer;
15987 unsigned int func, devnr = tp->pdev->devfn & ~7;
15989 for (func = 0; func < 8; func++) {
15990 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15991 if (peer && peer != tp->pdev)
15995 /* 5704 can be configured in single-port mode, set peer to
15996 * tp->pdev in that case.
16004 * We don't need to keep the refcount elevated; there's no way
16005 * to remove one half of this device without removing the other
16012 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16014 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16015 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16018 /* All devices that use the alternate
16019 * ASIC REV location have a CPMU.
16021 tg3_flag_set(tp, CPMU_PRESENT);
16023 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16024 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16025 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16026 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16027 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16028 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16029 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16030 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16031 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16032 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16033 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16034 reg = TG3PCI_GEN2_PRODID_ASICREV;
16035 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16036 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16037 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16038 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16039 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16040 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16041 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16042 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16043 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16044 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16045 reg = TG3PCI_GEN15_PRODID_ASICREV;
16047 reg = TG3PCI_PRODID_ASICREV;
16049 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16052 /* Wrong chip ID in 5752 A0. This code can be removed later
16053 * as A0 is not in production.
16055 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16056 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16058 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16059 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16061 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16062 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16063 tg3_asic_rev(tp) == ASIC_REV_5720)
16064 tg3_flag_set(tp, 5717_PLUS);
16066 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16067 tg3_asic_rev(tp) == ASIC_REV_57766)
16068 tg3_flag_set(tp, 57765_CLASS);
16070 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16071 tg3_asic_rev(tp) == ASIC_REV_5762)
16072 tg3_flag_set(tp, 57765_PLUS);
16074 /* Intentionally exclude ASIC_REV_5906 */
16075 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16076 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16077 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16078 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16079 tg3_asic_rev(tp) == ASIC_REV_5785 ||
16080 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16081 tg3_flag(tp, 57765_PLUS))
16082 tg3_flag_set(tp, 5755_PLUS);
16084 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16085 tg3_asic_rev(tp) == ASIC_REV_5714)
16086 tg3_flag_set(tp, 5780_CLASS);
16088 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16089 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16090 tg3_asic_rev(tp) == ASIC_REV_5906 ||
16091 tg3_flag(tp, 5755_PLUS) ||
16092 tg3_flag(tp, 5780_CLASS))
16093 tg3_flag_set(tp, 5750_PLUS);
16095 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16096 tg3_flag(tp, 5750_PLUS))
16097 tg3_flag_set(tp, 5705_PLUS);
16100 static bool tg3_10_100_only_device(struct tg3 *tp,
16101 const struct pci_device_id *ent)
16103 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16105 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16106 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16107 (tp->phy_flags & TG3_PHYFLG_IS_FET))
16110 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16111 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16112 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16122 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16125 u32 pci_state_reg, grc_misc_cfg;
16130 /* Force memory write invalidate off. If we leave it on,
16131 * then on 5700_BX chips we have to enable a workaround.
16132 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16133 * to match the cacheline size. The Broadcom driver have this
16134 * workaround but turns MWI off all the times so never uses
16135 * it. This seems to suggest that the workaround is insufficient.
16137 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16138 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16139 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16141 /* Important! -- Make sure register accesses are byteswapped
16142 * correctly. Also, for those chips that require it, make
16143 * sure that indirect register accesses are enabled before
16144 * the first operation.
16146 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16148 tp->misc_host_ctrl |= (misc_ctrl_reg &
16149 MISC_HOST_CTRL_CHIPREV);
16150 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16151 tp->misc_host_ctrl);
16153 tg3_detect_asic_rev(tp, misc_ctrl_reg);
16155 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16156 * we need to disable memory and use config. cycles
16157 * only to access all registers. The 5702/03 chips
16158 * can mistakenly decode the special cycles from the
16159 * ICH chipsets as memory write cycles, causing corruption
16160 * of register and memory space. Only certain ICH bridges
16161 * will drive special cycles with non-zero data during the
16162 * address phase which can fall within the 5703's address
16163 * range. This is not an ICH bug as the PCI spec allows
16164 * non-zero address during special cycles. However, only
16165 * these ICH bridges are known to drive non-zero addresses
16166 * during special cycles.
16168 * Since special cycles do not cross PCI bridges, we only
16169 * enable this workaround if the 5703 is on the secondary
16170 * bus of these ICH bridges.
16172 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16173 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16174 static struct tg3_dev_id {
16178 } ich_chipsets[] = {
16179 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16181 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16183 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16185 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16189 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16190 struct pci_dev *bridge = NULL;
16192 while (pci_id->vendor != 0) {
16193 bridge = pci_get_device(pci_id->vendor, pci_id->device,
16199 if (pci_id->rev != PCI_ANY_ID) {
16200 if (bridge->revision > pci_id->rev)
16203 if (bridge->subordinate &&
16204 (bridge->subordinate->number ==
16205 tp->pdev->bus->number)) {
16206 tg3_flag_set(tp, ICH_WORKAROUND);
16207 pci_dev_put(bridge);
16213 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16214 static struct tg3_dev_id {
16217 } bridge_chipsets[] = {
16218 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16219 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16222 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16223 struct pci_dev *bridge = NULL;
16225 while (pci_id->vendor != 0) {
16226 bridge = pci_get_device(pci_id->vendor,
16233 if (bridge->subordinate &&
16234 (bridge->subordinate->number <=
16235 tp->pdev->bus->number) &&
16236 (bridge->subordinate->busn_res.end >=
16237 tp->pdev->bus->number)) {
16238 tg3_flag_set(tp, 5701_DMA_BUG);
16239 pci_dev_put(bridge);
16245 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16246 * DMA addresses > 40-bit. This bridge may have other additional
16247 * 57xx devices behind it in some 4-port NIC designs for example.
16248 * Any tg3 device found behind the bridge will also need the 40-bit
16251 if (tg3_flag(tp, 5780_CLASS)) {
16252 tg3_flag_set(tp, 40BIT_DMA_BUG);
16253 tp->msi_cap = tp->pdev->msi_cap;
16255 struct pci_dev *bridge = NULL;
16258 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16259 PCI_DEVICE_ID_SERVERWORKS_EPB,
16261 if (bridge && bridge->subordinate &&
16262 (bridge->subordinate->number <=
16263 tp->pdev->bus->number) &&
16264 (bridge->subordinate->busn_res.end >=
16265 tp->pdev->bus->number)) {
16266 tg3_flag_set(tp, 40BIT_DMA_BUG);
16267 pci_dev_put(bridge);
16273 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16274 tg3_asic_rev(tp) == ASIC_REV_5714)
16275 tp->pdev_peer = tg3_find_peer(tp);
16277 /* Determine TSO capabilities */
16278 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16279 ; /* Do nothing. HW bug. */
16280 else if (tg3_flag(tp, 57765_PLUS))
16281 tg3_flag_set(tp, HW_TSO_3);
16282 else if (tg3_flag(tp, 5755_PLUS) ||
16283 tg3_asic_rev(tp) == ASIC_REV_5906)
16284 tg3_flag_set(tp, HW_TSO_2);
16285 else if (tg3_flag(tp, 5750_PLUS)) {
16286 tg3_flag_set(tp, HW_TSO_1);
16287 tg3_flag_set(tp, TSO_BUG);
16288 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16289 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16290 tg3_flag_clear(tp, TSO_BUG);
16291 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16292 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16293 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16294 tg3_flag_set(tp, FW_TSO);
16295 tg3_flag_set(tp, TSO_BUG);
16296 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16297 tp->fw_needed = FIRMWARE_TG3TSO5;
16299 tp->fw_needed = FIRMWARE_TG3TSO;
16302 /* Selectively allow TSO based on operating conditions */
16303 if (tg3_flag(tp, HW_TSO_1) ||
16304 tg3_flag(tp, HW_TSO_2) ||
16305 tg3_flag(tp, HW_TSO_3) ||
16306 tg3_flag(tp, FW_TSO)) {
16307 /* For firmware TSO, assume ASF is disabled.
16308 * We'll disable TSO later if we discover ASF
16309 * is enabled in tg3_get_eeprom_hw_cfg().
16311 tg3_flag_set(tp, TSO_CAPABLE);
16313 tg3_flag_clear(tp, TSO_CAPABLE);
16314 tg3_flag_clear(tp, TSO_BUG);
16315 tp->fw_needed = NULL;
16318 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16319 tp->fw_needed = FIRMWARE_TG3;
16321 if (tg3_asic_rev(tp) == ASIC_REV_57766)
16322 tp->fw_needed = FIRMWARE_TG357766;
16326 if (tg3_flag(tp, 5750_PLUS)) {
16327 tg3_flag_set(tp, SUPPORT_MSI);
16328 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16329 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16330 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16331 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16332 tp->pdev_peer == tp->pdev))
16333 tg3_flag_clear(tp, SUPPORT_MSI);
16335 if (tg3_flag(tp, 5755_PLUS) ||
16336 tg3_asic_rev(tp) == ASIC_REV_5906) {
16337 tg3_flag_set(tp, 1SHOT_MSI);
16340 if (tg3_flag(tp, 57765_PLUS)) {
16341 tg3_flag_set(tp, SUPPORT_MSIX);
16342 tp->irq_max = TG3_IRQ_MAX_VECS;
16348 if (tp->irq_max > 1) {
16349 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16350 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16352 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16353 tg3_asic_rev(tp) == ASIC_REV_5720)
16354 tp->txq_max = tp->irq_max - 1;
16357 if (tg3_flag(tp, 5755_PLUS) ||
16358 tg3_asic_rev(tp) == ASIC_REV_5906)
16359 tg3_flag_set(tp, SHORT_DMA_BUG);
16361 if (tg3_asic_rev(tp) == ASIC_REV_5719)
16362 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16364 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16365 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16366 tg3_asic_rev(tp) == ASIC_REV_5720 ||
16367 tg3_asic_rev(tp) == ASIC_REV_5762)
16368 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16370 if (tg3_flag(tp, 57765_PLUS) &&
16371 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16372 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16374 if (!tg3_flag(tp, 5705_PLUS) ||
16375 tg3_flag(tp, 5780_CLASS) ||
16376 tg3_flag(tp, USE_JUMBO_BDFLAG))
16377 tg3_flag_set(tp, JUMBO_CAPABLE);
16379 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16382 if (pci_is_pcie(tp->pdev)) {
16385 tg3_flag_set(tp, PCI_EXPRESS);
16387 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16388 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16389 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16390 tg3_flag_clear(tp, HW_TSO_2);
16391 tg3_flag_clear(tp, TSO_CAPABLE);
16393 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16394 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16395 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16396 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16397 tg3_flag_set(tp, CLKREQ_BUG);
16398 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16399 tg3_flag_set(tp, L1PLLPD_EN);
16401 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16402 /* BCM5785 devices are effectively PCIe devices, and should
16403 * follow PCIe codepaths, but do not have a PCIe capabilities
16406 tg3_flag_set(tp, PCI_EXPRESS);
16407 } else if (!tg3_flag(tp, 5705_PLUS) ||
16408 tg3_flag(tp, 5780_CLASS)) {
16409 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16410 if (!tp->pcix_cap) {
16411 dev_err(&tp->pdev->dev,
16412 "Cannot find PCI-X capability, aborting\n");
16416 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16417 tg3_flag_set(tp, PCIX_MODE);
16420 /* If we have an AMD 762 or VIA K8T800 chipset, write
16421 * reordering to the mailbox registers done by the host
16422 * controller can cause major troubles. We read back from
16423 * every mailbox register write to force the writes to be
16424 * posted to the chip in order.
16426 if (pci_dev_present(tg3_write_reorder_chipsets) &&
16427 !tg3_flag(tp, PCI_EXPRESS))
16428 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16430 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16431 &tp->pci_cacheline_sz);
16432 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16433 &tp->pci_lat_timer);
16434 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16435 tp->pci_lat_timer < 64) {
16436 tp->pci_lat_timer = 64;
16437 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16438 tp->pci_lat_timer);
16441 /* Important! -- It is critical that the PCI-X hw workaround
16442 * situation is decided before the first MMIO register access.
16444 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16445 /* 5700 BX chips need to have their TX producer index
16446 * mailboxes written twice to workaround a bug.
16448 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16450 /* If we are in PCI-X mode, enable register write workaround.
16452 * The workaround is to use indirect register accesses
16453 * for all chip writes not to mailbox registers.
16455 if (tg3_flag(tp, PCIX_MODE)) {
16458 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16460 /* The chip can have it's power management PCI config
16461 * space registers clobbered due to this bug.
16462 * So explicitly force the chip into D0 here.
16464 pci_read_config_dword(tp->pdev,
16465 tp->pdev->pm_cap + PCI_PM_CTRL,
16467 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16468 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16469 pci_write_config_dword(tp->pdev,
16470 tp->pdev->pm_cap + PCI_PM_CTRL,
16473 /* Also, force SERR#/PERR# in PCI command. */
16474 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16475 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16476 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16480 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16481 tg3_flag_set(tp, PCI_HIGH_SPEED);
16482 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16483 tg3_flag_set(tp, PCI_32BIT);
16485 /* Chip-specific fixup from Broadcom driver */
16486 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16487 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16488 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16489 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16492 /* Default fast path register access methods */
16493 tp->read32 = tg3_read32;
16494 tp->write32 = tg3_write32;
16495 tp->read32_mbox = tg3_read32;
16496 tp->write32_mbox = tg3_write32;
16497 tp->write32_tx_mbox = tg3_write32;
16498 tp->write32_rx_mbox = tg3_write32;
16500 /* Various workaround register access methods */
16501 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16502 tp->write32 = tg3_write_indirect_reg32;
16503 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16504 (tg3_flag(tp, PCI_EXPRESS) &&
16505 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16507 * Back to back register writes can cause problems on these
16508 * chips, the workaround is to read back all reg writes
16509 * except those to mailbox regs.
16511 * See tg3_write_indirect_reg32().
16513 tp->write32 = tg3_write_flush_reg32;
16516 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16517 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16518 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16519 tp->write32_rx_mbox = tg3_write_flush_reg32;
16522 if (tg3_flag(tp, ICH_WORKAROUND)) {
16523 tp->read32 = tg3_read_indirect_reg32;
16524 tp->write32 = tg3_write_indirect_reg32;
16525 tp->read32_mbox = tg3_read_indirect_mbox;
16526 tp->write32_mbox = tg3_write_indirect_mbox;
16527 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16528 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16533 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16534 pci_cmd &= ~PCI_COMMAND_MEMORY;
16535 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16537 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16538 tp->read32_mbox = tg3_read32_mbox_5906;
16539 tp->write32_mbox = tg3_write32_mbox_5906;
16540 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16541 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16544 if (tp->write32 == tg3_write_indirect_reg32 ||
16545 (tg3_flag(tp, PCIX_MODE) &&
16546 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16547 tg3_asic_rev(tp) == ASIC_REV_5701)))
16548 tg3_flag_set(tp, SRAM_USE_CONFIG);
16550 /* The memory arbiter has to be enabled in order for SRAM accesses
16551 * to succeed. Normally on powerup the tg3 chip firmware will make
16552 * sure it is enabled, but other entities such as system netboot
16553 * code might disable it.
16555 val = tr32(MEMARB_MODE);
16556 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16558 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16559 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16560 tg3_flag(tp, 5780_CLASS)) {
16561 if (tg3_flag(tp, PCIX_MODE)) {
16562 pci_read_config_dword(tp->pdev,
16563 tp->pcix_cap + PCI_X_STATUS,
16565 tp->pci_fn = val & 0x7;
16567 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16568 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16569 tg3_asic_rev(tp) == ASIC_REV_5720) {
16570 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16571 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16572 val = tr32(TG3_CPMU_STATUS);
16574 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16575 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16577 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16578 TG3_CPMU_STATUS_FSHFT_5719;
16581 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16582 tp->write32_tx_mbox = tg3_write_flush_reg32;
16583 tp->write32_rx_mbox = tg3_write_flush_reg32;
16586 /* Get eeprom hw config before calling tg3_set_power_state().
16587 * In particular, the TG3_FLAG_IS_NIC flag must be
16588 * determined before calling tg3_set_power_state() so that
16589 * we know whether or not to switch out of Vaux power.
16590 * When the flag is set, it means that GPIO1 is used for eeprom
16591 * write protect and also implies that it is a LOM where GPIOs
16592 * are not used to switch power.
16594 tg3_get_eeprom_hw_cfg(tp);
16596 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16597 tg3_flag_clear(tp, TSO_CAPABLE);
16598 tg3_flag_clear(tp, TSO_BUG);
16599 tp->fw_needed = NULL;
16602 if (tg3_flag(tp, ENABLE_APE)) {
16603 /* Allow reads and writes to the
16604 * APE register and memory space.
16606 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16607 PCISTATE_ALLOW_APE_SHMEM_WR |
16608 PCISTATE_ALLOW_APE_PSPACE_WR;
16609 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16612 tg3_ape_lock_init(tp);
16613 tp->ape_hb_interval =
16614 msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16617 /* Set up tp->grc_local_ctrl before calling
16618 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16619 * will bring 5700's external PHY out of reset.
16620 * It is also used as eeprom write protect on LOMs.
16622 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16623 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16624 tg3_flag(tp, EEPROM_WRITE_PROT))
16625 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16626 GRC_LCLCTRL_GPIO_OUTPUT1);
16627 /* Unused GPIO3 must be driven as output on 5752 because there
16628 * are no pull-up resistors on unused GPIO pins.
16630 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16631 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16633 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16634 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16635 tg3_flag(tp, 57765_CLASS))
16636 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16638 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16639 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16640 /* Turn off the debug UART. */
16641 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16642 if (tg3_flag(tp, IS_NIC))
16643 /* Keep VMain power. */
16644 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16645 GRC_LCLCTRL_GPIO_OUTPUT0;
16648 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16649 tp->grc_local_ctrl |=
16650 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16652 /* Switch out of Vaux if it is a NIC */
16653 tg3_pwrsrc_switch_to_vmain(tp);
16655 /* Derive initial jumbo mode from MTU assigned in
16656 * ether_setup() via the alloc_etherdev() call
16658 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16659 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16661 /* Determine WakeOnLan speed to use. */
16662 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16663 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16664 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16665 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16666 tg3_flag_clear(tp, WOL_SPEED_100MB);
16668 tg3_flag_set(tp, WOL_SPEED_100MB);
16671 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16672 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16674 /* A few boards don't want Ethernet@WireSpeed phy feature */
16675 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16676 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16677 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16678 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16679 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16680 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16681 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16683 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16684 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16685 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16686 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16687 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16689 if (tg3_flag(tp, 5705_PLUS) &&
16690 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16691 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16692 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16693 !tg3_flag(tp, 57765_PLUS)) {
16694 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16695 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16696 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16697 tg3_asic_rev(tp) == ASIC_REV_5761) {
16698 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16699 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16700 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16701 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16702 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16704 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16707 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16708 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16709 tp->phy_otp = tg3_read_otp_phycfg(tp);
16710 if (tp->phy_otp == 0)
16711 tp->phy_otp = TG3_OTP_DEFAULT;
16714 if (tg3_flag(tp, CPMU_PRESENT))
16715 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16717 tp->mi_mode = MAC_MI_MODE_BASE;
16719 tp->coalesce_mode = 0;
16720 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16721 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16722 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16724 /* Set these bits to enable statistics workaround. */
16725 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16726 tg3_asic_rev(tp) == ASIC_REV_5762 ||
16727 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16728 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16729 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16730 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16733 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16734 tg3_asic_rev(tp) == ASIC_REV_57780)
16735 tg3_flag_set(tp, USE_PHYLIB);
16737 err = tg3_mdio_init(tp);
16741 /* Initialize data/descriptor byte/word swapping. */
16742 val = tr32(GRC_MODE);
16743 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16744 tg3_asic_rev(tp) == ASIC_REV_5762)
16745 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16746 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16747 GRC_MODE_B2HRX_ENABLE |
16748 GRC_MODE_HTX2B_ENABLE |
16749 GRC_MODE_HOST_STACKUP);
16751 val &= GRC_MODE_HOST_STACKUP;
16753 tw32(GRC_MODE, val | tp->grc_mode);
16755 tg3_switch_clocks(tp);
16757 /* Clear this out for sanity. */
16758 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16760 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16761 tw32(TG3PCI_REG_BASE_ADDR, 0);
16763 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16765 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16766 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16767 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16768 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16769 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16770 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16771 void __iomem *sram_base;
16773 /* Write some dummy words into the SRAM status block
16774 * area, see if it reads back correctly. If the return
16775 * value is bad, force enable the PCIX workaround.
16777 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16779 writel(0x00000000, sram_base);
16780 writel(0x00000000, sram_base + 4);
16781 writel(0xffffffff, sram_base + 4);
16782 if (readl(sram_base) != 0x00000000)
16783 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16788 tg3_nvram_init(tp);
16790 /* If the device has an NVRAM, no need to load patch firmware */
16791 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16792 !tg3_flag(tp, NO_NVRAM))
16793 tp->fw_needed = NULL;
16795 grc_misc_cfg = tr32(GRC_MISC_CFG);
16796 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16798 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16799 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16800 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16801 tg3_flag_set(tp, IS_5788);
16803 if (!tg3_flag(tp, IS_5788) &&
16804 tg3_asic_rev(tp) != ASIC_REV_5700)
16805 tg3_flag_set(tp, TAGGED_STATUS);
16806 if (tg3_flag(tp, TAGGED_STATUS)) {
16807 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16808 HOSTCC_MODE_CLRTICK_TXBD);
16810 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16811 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16812 tp->misc_host_ctrl);
16815 /* Preserve the APE MAC_MODE bits */
16816 if (tg3_flag(tp, ENABLE_APE))
16817 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16821 if (tg3_10_100_only_device(tp, ent))
16822 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16824 err = tg3_phy_probe(tp);
16826 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16827 /* ... but do not return immediately ... */
16832 tg3_read_fw_ver(tp);
16834 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16835 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16837 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16838 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16840 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16843 /* 5700 {AX,BX} chips have a broken status block link
16844 * change bit implementation, so we must use the
16845 * status register in those cases.
16847 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16848 tg3_flag_set(tp, USE_LINKCHG_REG);
16850 tg3_flag_clear(tp, USE_LINKCHG_REG);
16852 /* The led_ctrl is set during tg3_phy_probe, here we might
16853 * have to force the link status polling mechanism based
16854 * upon subsystem IDs.
16856 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16857 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16858 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16859 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16860 tg3_flag_set(tp, USE_LINKCHG_REG);
16863 /* For all SERDES we poll the MAC status register. */
16864 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16865 tg3_flag_set(tp, POLL_SERDES);
16867 tg3_flag_clear(tp, POLL_SERDES);
16869 if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16870 tg3_flag_set(tp, POLL_CPMU_LINK);
16872 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16873 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16874 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16875 tg3_flag(tp, PCIX_MODE)) {
16876 tp->rx_offset = NET_SKB_PAD;
16877 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16878 tp->rx_copy_thresh = ~(u16)0;
16882 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16883 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16884 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16886 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16888 /* Increment the rx prod index on the rx std ring by at most
16889 * 8 for these chips to workaround hw errata.
16891 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16892 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16893 tg3_asic_rev(tp) == ASIC_REV_5755)
16894 tp->rx_std_max_post = 8;
16896 if (tg3_flag(tp, ASPM_WORKAROUND))
16897 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16898 PCIE_PWR_MGMT_L1_THRESH_MSK;
16903 static int tg3_get_device_address(struct tg3 *tp, u8 *addr)
16905 u32 hi, lo, mac_offset;
16909 if (!eth_platform_get_mac_address(&tp->pdev->dev, addr))
16912 if (tg3_flag(tp, IS_SSB_CORE)) {
16913 err = ssb_gige_get_macaddr(tp->pdev, addr);
16914 if (!err && is_valid_ether_addr(addr))
16919 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16920 tg3_flag(tp, 5780_CLASS)) {
16921 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16923 if (tg3_nvram_lock(tp))
16924 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16926 tg3_nvram_unlock(tp);
16927 } else if (tg3_flag(tp, 5717_PLUS)) {
16928 if (tp->pci_fn & 1)
16930 if (tp->pci_fn > 1)
16931 mac_offset += 0x18c;
16932 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16935 /* First try to get it from MAC address mailbox. */
16936 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16937 if ((hi >> 16) == 0x484b) {
16938 addr[0] = (hi >> 8) & 0xff;
16939 addr[1] = (hi >> 0) & 0xff;
16941 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16942 addr[2] = (lo >> 24) & 0xff;
16943 addr[3] = (lo >> 16) & 0xff;
16944 addr[4] = (lo >> 8) & 0xff;
16945 addr[5] = (lo >> 0) & 0xff;
16947 /* Some old bootcode may report a 0 MAC address in SRAM */
16948 addr_ok = is_valid_ether_addr(addr);
16951 /* Next, try NVRAM. */
16952 if (!tg3_flag(tp, NO_NVRAM) &&
16953 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16954 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16955 memcpy(&addr[0], ((char *)&hi) + 2, 2);
16956 memcpy(&addr[2], (char *)&lo, sizeof(lo));
16958 /* Finally just fetch it out of the MAC control regs. */
16960 hi = tr32(MAC_ADDR_0_HIGH);
16961 lo = tr32(MAC_ADDR_0_LOW);
16963 addr[5] = lo & 0xff;
16964 addr[4] = (lo >> 8) & 0xff;
16965 addr[3] = (lo >> 16) & 0xff;
16966 addr[2] = (lo >> 24) & 0xff;
16967 addr[1] = hi & 0xff;
16968 addr[0] = (hi >> 8) & 0xff;
16972 if (!is_valid_ether_addr(addr))
16977 #define BOUNDARY_SINGLE_CACHELINE 1
16978 #define BOUNDARY_MULTI_CACHELINE 2
16980 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16982 int cacheline_size;
16986 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16988 cacheline_size = 1024;
16990 cacheline_size = (int) byte * 4;
16992 /* On 5703 and later chips, the boundary bits have no
16995 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16996 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16997 !tg3_flag(tp, PCI_EXPRESS))
17000 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17001 goal = BOUNDARY_MULTI_CACHELINE;
17003 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17004 goal = BOUNDARY_SINGLE_CACHELINE;
17010 if (tg3_flag(tp, 57765_PLUS)) {
17011 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17018 /* PCI controllers on most RISC systems tend to disconnect
17019 * when a device tries to burst across a cache-line boundary.
17020 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17022 * Unfortunately, for PCI-E there are only limited
17023 * write-side controls for this, and thus for reads
17024 * we will still get the disconnects. We'll also waste
17025 * these PCI cycles for both read and write for chips
17026 * other than 5700 and 5701 which do not implement the
17029 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17030 switch (cacheline_size) {
17035 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17036 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17037 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17039 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17040 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17045 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17046 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17050 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17051 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17054 } else if (tg3_flag(tp, PCI_EXPRESS)) {
17055 switch (cacheline_size) {
17059 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17060 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17061 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17067 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17068 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17072 switch (cacheline_size) {
17074 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17075 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17076 DMA_RWCTRL_WRITE_BNDRY_16);
17081 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17082 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17083 DMA_RWCTRL_WRITE_BNDRY_32);
17088 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17089 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17090 DMA_RWCTRL_WRITE_BNDRY_64);
17095 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17096 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17097 DMA_RWCTRL_WRITE_BNDRY_128);
17102 val |= (DMA_RWCTRL_READ_BNDRY_256 |
17103 DMA_RWCTRL_WRITE_BNDRY_256);
17106 val |= (DMA_RWCTRL_READ_BNDRY_512 |
17107 DMA_RWCTRL_WRITE_BNDRY_512);
17111 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17112 DMA_RWCTRL_WRITE_BNDRY_1024);
17121 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17122 int size, bool to_device)
17124 struct tg3_internal_buffer_desc test_desc;
17125 u32 sram_dma_descs;
17128 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17130 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17131 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17132 tw32(RDMAC_STATUS, 0);
17133 tw32(WDMAC_STATUS, 0);
17135 tw32(BUFMGR_MODE, 0);
17136 tw32(FTQ_RESET, 0);
17138 test_desc.addr_hi = ((u64) buf_dma) >> 32;
17139 test_desc.addr_lo = buf_dma & 0xffffffff;
17140 test_desc.nic_mbuf = 0x00002100;
17141 test_desc.len = size;
17144 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17145 * the *second* time the tg3 driver was getting loaded after an
17148 * Broadcom tells me:
17149 * ...the DMA engine is connected to the GRC block and a DMA
17150 * reset may affect the GRC block in some unpredictable way...
17151 * The behavior of resets to individual blocks has not been tested.
17153 * Broadcom noted the GRC reset will also reset all sub-components.
17156 test_desc.cqid_sqid = (13 << 8) | 2;
17158 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17161 test_desc.cqid_sqid = (16 << 8) | 7;
17163 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17166 test_desc.flags = 0x00000005;
17168 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17171 val = *(((u32 *)&test_desc) + i);
17172 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17173 sram_dma_descs + (i * sizeof(u32)));
17174 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17176 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17179 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17181 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17184 for (i = 0; i < 40; i++) {
17188 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17190 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17191 if ((val & 0xffff) == sram_dma_descs) {
17202 #define TEST_BUFFER_SIZE 0x2000
17204 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17205 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17209 static int tg3_test_dma(struct tg3 *tp)
17211 dma_addr_t buf_dma;
17212 u32 *buf, saved_dma_rwctrl;
17215 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17216 &buf_dma, GFP_KERNEL);
17222 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17223 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17225 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17227 if (tg3_flag(tp, 57765_PLUS))
17230 if (tg3_flag(tp, PCI_EXPRESS)) {
17231 /* DMA read watermark not used on PCIE */
17232 tp->dma_rwctrl |= 0x00180000;
17233 } else if (!tg3_flag(tp, PCIX_MODE)) {
17234 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17235 tg3_asic_rev(tp) == ASIC_REV_5750)
17236 tp->dma_rwctrl |= 0x003f0000;
17238 tp->dma_rwctrl |= 0x003f000f;
17240 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17241 tg3_asic_rev(tp) == ASIC_REV_5704) {
17242 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17243 u32 read_water = 0x7;
17245 /* If the 5704 is behind the EPB bridge, we can
17246 * do the less restrictive ONE_DMA workaround for
17247 * better performance.
17249 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17250 tg3_asic_rev(tp) == ASIC_REV_5704)
17251 tp->dma_rwctrl |= 0x8000;
17252 else if (ccval == 0x6 || ccval == 0x7)
17253 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17255 if (tg3_asic_rev(tp) == ASIC_REV_5703)
17257 /* Set bit 23 to enable PCIX hw bug fix */
17259 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17260 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17262 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17263 /* 5780 always in PCIX mode */
17264 tp->dma_rwctrl |= 0x00144000;
17265 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17266 /* 5714 always in PCIX mode */
17267 tp->dma_rwctrl |= 0x00148000;
17269 tp->dma_rwctrl |= 0x001b000f;
17272 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17273 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17275 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17276 tg3_asic_rev(tp) == ASIC_REV_5704)
17277 tp->dma_rwctrl &= 0xfffffff0;
17279 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17280 tg3_asic_rev(tp) == ASIC_REV_5701) {
17281 /* Remove this if it causes problems for some boards. */
17282 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17284 /* On 5700/5701 chips, we need to set this bit.
17285 * Otherwise the chip will issue cacheline transactions
17286 * to streamable DMA memory with not all the byte
17287 * enables turned on. This is an error on several
17288 * RISC PCI controllers, in particular sparc64.
17290 * On 5703/5704 chips, this bit has been reassigned
17291 * a different meaning. In particular, it is used
17292 * on those chips to enable a PCI-X workaround.
17294 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17297 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17300 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17301 tg3_asic_rev(tp) != ASIC_REV_5701)
17304 /* It is best to perform DMA test with maximum write burst size
17305 * to expose the 5700/5701 write DMA bug.
17307 saved_dma_rwctrl = tp->dma_rwctrl;
17308 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17309 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17314 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17317 /* Send the buffer to the chip. */
17318 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17320 dev_err(&tp->pdev->dev,
17321 "%s: Buffer write failed. err = %d\n",
17326 /* Now read it back. */
17327 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17329 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17330 "err = %d\n", __func__, ret);
17335 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17339 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17340 DMA_RWCTRL_WRITE_BNDRY_16) {
17341 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17342 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17343 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17346 dev_err(&tp->pdev->dev,
17347 "%s: Buffer corrupted on read back! "
17348 "(%d != %d)\n", __func__, p[i], i);
17354 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17360 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17361 DMA_RWCTRL_WRITE_BNDRY_16) {
17362 /* DMA test passed without adjusting DMA boundary,
17363 * now look for chipsets that are known to expose the
17364 * DMA bug without failing the test.
17366 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17367 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17368 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17370 /* Safe to use the calculated DMA boundary. */
17371 tp->dma_rwctrl = saved_dma_rwctrl;
17374 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17378 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17383 static void tg3_init_bufmgr_config(struct tg3 *tp)
17385 if (tg3_flag(tp, 57765_PLUS)) {
17386 tp->bufmgr_config.mbuf_read_dma_low_water =
17387 DEFAULT_MB_RDMA_LOW_WATER_5705;
17388 tp->bufmgr_config.mbuf_mac_rx_low_water =
17389 DEFAULT_MB_MACRX_LOW_WATER_57765;
17390 tp->bufmgr_config.mbuf_high_water =
17391 DEFAULT_MB_HIGH_WATER_57765;
17393 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17394 DEFAULT_MB_RDMA_LOW_WATER_5705;
17395 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17396 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17397 tp->bufmgr_config.mbuf_high_water_jumbo =
17398 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17399 } else if (tg3_flag(tp, 5705_PLUS)) {
17400 tp->bufmgr_config.mbuf_read_dma_low_water =
17401 DEFAULT_MB_RDMA_LOW_WATER_5705;
17402 tp->bufmgr_config.mbuf_mac_rx_low_water =
17403 DEFAULT_MB_MACRX_LOW_WATER_5705;
17404 tp->bufmgr_config.mbuf_high_water =
17405 DEFAULT_MB_HIGH_WATER_5705;
17406 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17407 tp->bufmgr_config.mbuf_mac_rx_low_water =
17408 DEFAULT_MB_MACRX_LOW_WATER_5906;
17409 tp->bufmgr_config.mbuf_high_water =
17410 DEFAULT_MB_HIGH_WATER_5906;
17413 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17414 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17415 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17416 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17417 tp->bufmgr_config.mbuf_high_water_jumbo =
17418 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17420 tp->bufmgr_config.mbuf_read_dma_low_water =
17421 DEFAULT_MB_RDMA_LOW_WATER;
17422 tp->bufmgr_config.mbuf_mac_rx_low_water =
17423 DEFAULT_MB_MACRX_LOW_WATER;
17424 tp->bufmgr_config.mbuf_high_water =
17425 DEFAULT_MB_HIGH_WATER;
17427 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17428 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17429 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17430 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17431 tp->bufmgr_config.mbuf_high_water_jumbo =
17432 DEFAULT_MB_HIGH_WATER_JUMBO;
17435 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17436 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17439 static char *tg3_phy_string(struct tg3 *tp)
17441 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17442 case TG3_PHY_ID_BCM5400: return "5400";
17443 case TG3_PHY_ID_BCM5401: return "5401";
17444 case TG3_PHY_ID_BCM5411: return "5411";
17445 case TG3_PHY_ID_BCM5701: return "5701";
17446 case TG3_PHY_ID_BCM5703: return "5703";
17447 case TG3_PHY_ID_BCM5704: return "5704";
17448 case TG3_PHY_ID_BCM5705: return "5705";
17449 case TG3_PHY_ID_BCM5750: return "5750";
17450 case TG3_PHY_ID_BCM5752: return "5752";
17451 case TG3_PHY_ID_BCM5714: return "5714";
17452 case TG3_PHY_ID_BCM5780: return "5780";
17453 case TG3_PHY_ID_BCM5755: return "5755";
17454 case TG3_PHY_ID_BCM5787: return "5787";
17455 case TG3_PHY_ID_BCM5784: return "5784";
17456 case TG3_PHY_ID_BCM5756: return "5722/5756";
17457 case TG3_PHY_ID_BCM5906: return "5906";
17458 case TG3_PHY_ID_BCM5761: return "5761";
17459 case TG3_PHY_ID_BCM5718C: return "5718C";
17460 case TG3_PHY_ID_BCM5718S: return "5718S";
17461 case TG3_PHY_ID_BCM57765: return "57765";
17462 case TG3_PHY_ID_BCM5719C: return "5719C";
17463 case TG3_PHY_ID_BCM5720C: return "5720C";
17464 case TG3_PHY_ID_BCM5762: return "5762C";
17465 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17466 case 0: return "serdes";
17467 default: return "unknown";
17471 static char *tg3_bus_string(struct tg3 *tp, char *str)
17473 if (tg3_flag(tp, PCI_EXPRESS)) {
17474 strcpy(str, "PCI Express");
17476 } else if (tg3_flag(tp, PCIX_MODE)) {
17477 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17479 strcpy(str, "PCIX:");
17481 if ((clock_ctrl == 7) ||
17482 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17483 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17484 strcat(str, "133MHz");
17485 else if (clock_ctrl == 0)
17486 strcat(str, "33MHz");
17487 else if (clock_ctrl == 2)
17488 strcat(str, "50MHz");
17489 else if (clock_ctrl == 4)
17490 strcat(str, "66MHz");
17491 else if (clock_ctrl == 6)
17492 strcat(str, "100MHz");
17494 strcpy(str, "PCI:");
17495 if (tg3_flag(tp, PCI_HIGH_SPEED))
17496 strcat(str, "66MHz");
17498 strcat(str, "33MHz");
17500 if (tg3_flag(tp, PCI_32BIT))
17501 strcat(str, ":32-bit");
17503 strcat(str, ":64-bit");
17507 static void tg3_init_coal(struct tg3 *tp)
17509 struct ethtool_coalesce *ec = &tp->coal;
17511 memset(ec, 0, sizeof(*ec));
17512 ec->cmd = ETHTOOL_GCOALESCE;
17513 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17514 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17515 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17516 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17517 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17518 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17519 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17520 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17521 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17523 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17524 HOSTCC_MODE_CLRTICK_TXBD)) {
17525 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17526 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17527 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17528 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17531 if (tg3_flag(tp, 5705_PLUS)) {
17532 ec->rx_coalesce_usecs_irq = 0;
17533 ec->tx_coalesce_usecs_irq = 0;
17534 ec->stats_block_coalesce_usecs = 0;
17538 static int tg3_init_one(struct pci_dev *pdev,
17539 const struct pci_device_id *ent)
17541 struct net_device *dev;
17544 u32 sndmbx, rcvmbx, intmbx;
17546 u64 dma_mask, persist_dma_mask;
17547 netdev_features_t features = 0;
17548 u8 addr[ETH_ALEN] __aligned(2);
17550 err = pci_enable_device(pdev);
17552 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17556 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17558 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17559 goto err_out_disable_pdev;
17562 pci_set_master(pdev);
17564 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17567 goto err_out_free_res;
17570 SET_NETDEV_DEV(dev, &pdev->dev);
17572 tp = netdev_priv(dev);
17575 tp->rx_mode = TG3_DEF_RX_MODE;
17576 tp->tx_mode = TG3_DEF_TX_MODE;
17578 tp->pcierr_recovery = false;
17581 tp->msg_enable = tg3_debug;
17583 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17585 if (pdev_is_ssb_gige_core(pdev)) {
17586 tg3_flag_set(tp, IS_SSB_CORE);
17587 if (ssb_gige_must_flush_posted_writes(pdev))
17588 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17589 if (ssb_gige_one_dma_at_once(pdev))
17590 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17591 if (ssb_gige_have_roboswitch(pdev)) {
17592 tg3_flag_set(tp, USE_PHYLIB);
17593 tg3_flag_set(tp, ROBOSWITCH);
17595 if (ssb_gige_is_rgmii(pdev))
17596 tg3_flag_set(tp, RGMII_MODE);
17599 /* The word/byte swap controls here control register access byte
17600 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17603 tp->misc_host_ctrl =
17604 MISC_HOST_CTRL_MASK_PCI_INT |
17605 MISC_HOST_CTRL_WORD_SWAP |
17606 MISC_HOST_CTRL_INDIR_ACCESS |
17607 MISC_HOST_CTRL_PCISTATE_RW;
17609 /* The NONFRM (non-frame) byte/word swap controls take effect
17610 * on descriptor entries, anything which isn't packet data.
17612 * The StrongARM chips on the board (one for tx, one for rx)
17613 * are running in big-endian mode.
17615 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17616 GRC_MODE_WSWAP_NONFRM_DATA);
17617 #ifdef __BIG_ENDIAN
17618 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17620 spin_lock_init(&tp->lock);
17621 spin_lock_init(&tp->indirect_lock);
17622 INIT_WORK(&tp->reset_task, tg3_reset_task);
17624 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17626 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17628 goto err_out_free_dev;
17631 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17632 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17633 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17634 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17635 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17636 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17637 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17638 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17639 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17640 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17641 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17642 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17643 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17644 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17645 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17646 tg3_flag_set(tp, ENABLE_APE);
17647 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17648 if (!tp->aperegs) {
17649 dev_err(&pdev->dev,
17650 "Cannot map APE registers, aborting\n");
17652 goto err_out_iounmap;
17656 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17657 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17659 dev->ethtool_ops = &tg3_ethtool_ops;
17660 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17661 dev->netdev_ops = &tg3_netdev_ops;
17662 dev->irq = pdev->irq;
17664 err = tg3_get_invariants(tp, ent);
17666 dev_err(&pdev->dev,
17667 "Problem fetching invariants of chip, aborting\n");
17668 goto err_out_apeunmap;
17671 /* The EPB bridge inside 5714, 5715, and 5780 and any
17672 * device behind the EPB cannot support DMA addresses > 40-bit.
17673 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17674 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17675 * do DMA address check in tg3_start_xmit().
17677 if (tg3_flag(tp, IS_5788))
17678 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17679 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17680 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17681 #ifdef CONFIG_HIGHMEM
17682 dma_mask = DMA_BIT_MASK(64);
17685 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17687 /* Configure DMA attributes. */
17688 if (dma_mask > DMA_BIT_MASK(32)) {
17689 err = dma_set_mask(&pdev->dev, dma_mask);
17691 features |= NETIF_F_HIGHDMA;
17692 err = dma_set_coherent_mask(&pdev->dev,
17695 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17696 "DMA for consistent allocations\n");
17697 goto err_out_apeunmap;
17701 if (err || dma_mask == DMA_BIT_MASK(32)) {
17702 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
17704 dev_err(&pdev->dev,
17705 "No usable DMA configuration, aborting\n");
17706 goto err_out_apeunmap;
17710 tg3_init_bufmgr_config(tp);
17712 /* 5700 B0 chips do not support checksumming correctly due
17713 * to hardware bugs.
17715 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17716 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17718 if (tg3_flag(tp, 5755_PLUS))
17719 features |= NETIF_F_IPV6_CSUM;
17722 /* TSO is on by default on chips that support hardware TSO.
17723 * Firmware TSO on older chips gives lower performance, so it
17724 * is off by default, but can be enabled using ethtool.
17726 if ((tg3_flag(tp, HW_TSO_1) ||
17727 tg3_flag(tp, HW_TSO_2) ||
17728 tg3_flag(tp, HW_TSO_3)) &&
17729 (features & NETIF_F_IP_CSUM))
17730 features |= NETIF_F_TSO;
17731 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17732 if (features & NETIF_F_IPV6_CSUM)
17733 features |= NETIF_F_TSO6;
17734 if (tg3_flag(tp, HW_TSO_3) ||
17735 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17736 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17737 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17738 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17739 tg3_asic_rev(tp) == ASIC_REV_57780)
17740 features |= NETIF_F_TSO_ECN;
17743 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17744 NETIF_F_HW_VLAN_CTAG_RX;
17745 dev->vlan_features |= features;
17748 * Add loopback capability only for a subset of devices that support
17749 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17750 * loopback for the remaining devices.
17752 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17753 !tg3_flag(tp, CPMU_PRESENT))
17754 /* Add the loopback capability */
17755 features |= NETIF_F_LOOPBACK;
17757 dev->hw_features |= features;
17758 dev->priv_flags |= IFF_UNICAST_FLT;
17760 /* MTU range: 60 - 9000 or 1500, depending on hardware */
17761 dev->min_mtu = TG3_MIN_MTU;
17762 dev->max_mtu = TG3_MAX_MTU(tp);
17764 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17765 !tg3_flag(tp, TSO_CAPABLE) &&
17766 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17767 tg3_flag_set(tp, MAX_RXPEND_64);
17768 tp->rx_pending = 63;
17771 err = tg3_get_device_address(tp, addr);
17773 dev_err(&pdev->dev,
17774 "Could not obtain valid ethernet address, aborting\n");
17775 goto err_out_apeunmap;
17777 eth_hw_addr_set(dev, addr);
17779 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17780 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17781 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17782 for (i = 0; i < tp->irq_max; i++) {
17783 struct tg3_napi *tnapi = &tp->napi[i];
17786 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17788 tnapi->int_mbox = intmbx;
17794 tnapi->consmbox = rcvmbx;
17795 tnapi->prodmbox = sndmbx;
17798 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17800 tnapi->coal_now = HOSTCC_MODE_NOW;
17802 if (!tg3_flag(tp, SUPPORT_MSIX))
17806 * If we support MSIX, we'll be using RSS. If we're using
17807 * RSS, the first vector only handles link interrupts and the
17808 * remaining vectors handle rx and tx interrupts. Reuse the
17809 * mailbox values for the next iteration. The values we setup
17810 * above are still useful for the single vectored mode.
17824 * Reset chip in case UNDI or EFI driver did not shutdown
17825 * DMA self test will enable WDMAC and we'll see (spurious)
17826 * pending DMA on the PCI bus at that point.
17828 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17829 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17830 tg3_full_lock(tp, 0);
17831 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17832 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17833 tg3_full_unlock(tp);
17836 err = tg3_test_dma(tp);
17838 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17839 goto err_out_apeunmap;
17844 pci_set_drvdata(pdev, dev);
17846 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17847 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17848 tg3_asic_rev(tp) == ASIC_REV_5762)
17849 tg3_flag_set(tp, PTP_CAPABLE);
17851 tg3_timer_init(tp);
17853 tg3_carrier_off(tp);
17855 err = register_netdev(dev);
17857 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17858 goto err_out_apeunmap;
17861 if (tg3_flag(tp, PTP_CAPABLE)) {
17863 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17865 if (IS_ERR(tp->ptp_clock))
17866 tp->ptp_clock = NULL;
17869 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17870 tp->board_part_number,
17871 tg3_chip_rev_id(tp),
17872 tg3_bus_string(tp, str),
17875 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17878 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17879 ethtype = "10/100Base-TX";
17880 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17881 ethtype = "1000Base-SX";
17883 ethtype = "10/100/1000Base-T";
17885 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17886 "(WireSpeed[%d], EEE[%d])\n",
17887 tg3_phy_string(tp), ethtype,
17888 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17889 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17892 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17893 (dev->features & NETIF_F_RXCSUM) != 0,
17894 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17895 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17896 tg3_flag(tp, ENABLE_ASF) != 0,
17897 tg3_flag(tp, TSO_CAPABLE) != 0);
17898 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17900 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17901 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17903 pci_save_state(pdev);
17909 iounmap(tp->aperegs);
17910 tp->aperegs = NULL;
17923 pci_release_regions(pdev);
17925 err_out_disable_pdev:
17926 if (pci_is_enabled(pdev))
17927 pci_disable_device(pdev);
17931 static void tg3_remove_one(struct pci_dev *pdev)
17933 struct net_device *dev = pci_get_drvdata(pdev);
17936 struct tg3 *tp = netdev_priv(dev);
17940 release_firmware(tp->fw);
17942 tg3_reset_task_cancel(tp);
17944 if (tg3_flag(tp, USE_PHYLIB)) {
17949 unregister_netdev(dev);
17951 iounmap(tp->aperegs);
17952 tp->aperegs = NULL;
17959 pci_release_regions(pdev);
17960 pci_disable_device(pdev);
17964 #ifdef CONFIG_PM_SLEEP
17965 static int tg3_suspend(struct device *device)
17967 struct net_device *dev = dev_get_drvdata(device);
17968 struct tg3 *tp = netdev_priv(dev);
17973 if (!netif_running(dev))
17976 tg3_reset_task_cancel(tp);
17978 tg3_netif_stop(tp);
17980 tg3_timer_stop(tp);
17982 tg3_full_lock(tp, 1);
17983 tg3_disable_ints(tp);
17984 tg3_full_unlock(tp);
17986 netif_device_detach(dev);
17988 tg3_full_lock(tp, 0);
17989 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17990 tg3_flag_clear(tp, INIT_COMPLETE);
17991 tg3_full_unlock(tp);
17993 err = tg3_power_down_prepare(tp);
17997 tg3_full_lock(tp, 0);
17999 tg3_flag_set(tp, INIT_COMPLETE);
18000 err2 = tg3_restart_hw(tp, true);
18004 tg3_timer_start(tp);
18006 netif_device_attach(dev);
18007 tg3_netif_start(tp);
18010 tg3_full_unlock(tp);
18021 static int tg3_resume(struct device *device)
18023 struct net_device *dev = dev_get_drvdata(device);
18024 struct tg3 *tp = netdev_priv(dev);
18029 if (!netif_running(dev))
18032 netif_device_attach(dev);
18034 tg3_full_lock(tp, 0);
18036 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18038 tg3_flag_set(tp, INIT_COMPLETE);
18039 err = tg3_restart_hw(tp,
18040 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18044 tg3_timer_start(tp);
18046 tg3_netif_start(tp);
18049 tg3_full_unlock(tp);
18058 #endif /* CONFIG_PM_SLEEP */
18060 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18062 static void tg3_shutdown(struct pci_dev *pdev)
18064 struct net_device *dev = pci_get_drvdata(pdev);
18065 struct tg3 *tp = netdev_priv(dev);
18068 netif_device_detach(dev);
18070 if (netif_running(dev))
18073 if (system_state == SYSTEM_POWER_OFF)
18074 tg3_power_down(tp);
18080 * tg3_io_error_detected - called when PCI error is detected
18081 * @pdev: Pointer to PCI device
18082 * @state: The current pci connection state
18084 * This function is called after a PCI bus error affecting
18085 * this device has been detected.
18087 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18088 pci_channel_state_t state)
18090 struct net_device *netdev = pci_get_drvdata(pdev);
18091 struct tg3 *tp = netdev_priv(netdev);
18092 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18094 netdev_info(netdev, "PCI I/O error detected\n");
18098 /* Could be second call or maybe we don't have netdev yet */
18099 if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
18102 /* We needn't recover from permanent error */
18103 if (state == pci_channel_io_frozen)
18104 tp->pcierr_recovery = true;
18108 tg3_netif_stop(tp);
18110 tg3_timer_stop(tp);
18112 /* Want to make sure that the reset task doesn't run */
18113 tg3_reset_task_cancel(tp);
18115 netif_device_detach(netdev);
18117 /* Clean up software state, even if MMIO is blocked */
18118 tg3_full_lock(tp, 0);
18119 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18120 tg3_full_unlock(tp);
18123 if (state == pci_channel_io_perm_failure) {
18125 tg3_napi_enable(tp);
18128 err = PCI_ERS_RESULT_DISCONNECT;
18130 pci_disable_device(pdev);
18139 * tg3_io_slot_reset - called after the pci bus has been reset.
18140 * @pdev: Pointer to PCI device
18142 * Restart the card from scratch, as if from a cold-boot.
18143 * At this point, the card has exprienced a hard reset,
18144 * followed by fixups by BIOS, and has its config space
18145 * set up identically to what it was at cold boot.
18147 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18149 struct net_device *netdev = pci_get_drvdata(pdev);
18150 struct tg3 *tp = netdev_priv(netdev);
18151 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18156 if (pci_enable_device(pdev)) {
18157 dev_err(&pdev->dev,
18158 "Cannot re-enable PCI device after reset.\n");
18162 pci_set_master(pdev);
18163 pci_restore_state(pdev);
18164 pci_save_state(pdev);
18166 if (!netdev || !netif_running(netdev)) {
18167 rc = PCI_ERS_RESULT_RECOVERED;
18171 err = tg3_power_up(tp);
18175 rc = PCI_ERS_RESULT_RECOVERED;
18178 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18179 tg3_napi_enable(tp);
18188 * tg3_io_resume - called when traffic can start flowing again.
18189 * @pdev: Pointer to PCI device
18191 * This callback is called when the error recovery driver tells
18192 * us that its OK to resume normal operation.
18194 static void tg3_io_resume(struct pci_dev *pdev)
18196 struct net_device *netdev = pci_get_drvdata(pdev);
18197 struct tg3 *tp = netdev_priv(netdev);
18202 if (!netdev || !netif_running(netdev))
18205 tg3_full_lock(tp, 0);
18206 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18207 tg3_flag_set(tp, INIT_COMPLETE);
18208 err = tg3_restart_hw(tp, true);
18210 tg3_full_unlock(tp);
18211 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18215 netif_device_attach(netdev);
18217 tg3_timer_start(tp);
18219 tg3_netif_start(tp);
18221 tg3_full_unlock(tp);
18226 tp->pcierr_recovery = false;
18230 static const struct pci_error_handlers tg3_err_handler = {
18231 .error_detected = tg3_io_error_detected,
18232 .slot_reset = tg3_io_slot_reset,
18233 .resume = tg3_io_resume
18236 static struct pci_driver tg3_driver = {
18237 .name = DRV_MODULE_NAME,
18238 .id_table = tg3_pci_tbl,
18239 .probe = tg3_init_one,
18240 .remove = tg3_remove_one,
18241 .err_handler = &tg3_err_handler,
18242 .driver.pm = &tg3_pm_ops,
18243 .shutdown = tg3_shutdown,
18246 module_pci_driver(tg3_driver);