2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2016 Broadcom Corporation.
8 * Copyright (C) 2016-2017 Broadcom Limited.
9 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10 * refers to Broadcom Inc. and/or its subsidiaries.
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17 #include <linux/stringify.h>
18 #include <linux/kernel.h>
19 #include <linux/sched/signal.h>
20 #include <linux/types.h>
21 #include <linux/compiler.h>
22 #include <linux/slab.h>
23 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/ioport.h>
27 #include <linux/pci.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/skbuff.h>
31 #include <linux/ethtool.h>
32 #include <linux/mdio.h>
33 #include <linux/mii.h>
34 #include <linux/phy.h>
35 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/firmware.h>
44 #include <linux/ssb/ssb_driver_gige.h>
45 #include <linux/hwmon.h>
46 #include <linux/hwmon-sysfs.h>
47 #include <linux/crc32poly.h>
49 #include <net/checksum.h>
54 #include <asm/byteorder.h>
55 #include <linux/uaccess.h>
57 #include <uapi/linux/net_tstamp.h>
58 #include <linux/ptp_clock_kernel.h>
65 /* Functions & macros to verify TG3_FLAGS types */
67 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 return test_bit(flag, bits);
72 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
77 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 clear_bit(flag, bits);
82 #define tg3_flag(tp, flag) \
83 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
84 #define tg3_flag_set(tp, flag) \
85 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_clear(tp, flag) \
87 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89 #define DRV_MODULE_NAME "tg3"
90 /* DO NOT UPDATE TG3_*_NUM defines */
92 #define TG3_MIN_NUM 137
94 #define RESET_KIND_SHUTDOWN 0
95 #define RESET_KIND_INIT 1
96 #define RESET_KIND_SUSPEND 2
98 #define TG3_DEF_RX_MODE 0
99 #define TG3_DEF_TX_MODE 0
100 #define TG3_DEF_MSG_ENABLE \
110 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
112 /* length of time before we decide the hardware is borked,
113 * and dev->tx_timeout() should be called to fix the problem
116 #define TG3_TX_TIMEOUT (5 * HZ)
118 /* hardware minimum and maximum for a single frame's data payload */
119 #define TG3_MIN_MTU ETH_ZLEN
120 #define TG3_MAX_MTU(tp) \
121 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
123 /* These numbers seem to be hard coded in the NIC firmware somehow.
124 * You can't change the ring sizes, but you can change where you place
125 * them in the NIC onboard memory.
127 #define TG3_RX_STD_RING_SIZE(tp) \
128 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
129 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
130 #define TG3_DEF_RX_RING_PENDING 200
131 #define TG3_RX_JMB_RING_SIZE(tp) \
132 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
133 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
134 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
136 /* Do not place this n-ring entries value into the tp struct itself,
137 * we really want to expose these constants to GCC so that modulo et
138 * al. operations are done with shifts and masks instead of with
139 * hw multiply/modulo instructions. Another solution would be to
140 * replace things like '% foo' with '& (foo - 1)'.
143 #define TG3_TX_RING_SIZE 512
144 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
146 #define TG3_RX_STD_RING_BYTES(tp) \
147 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
148 #define TG3_RX_JMB_RING_BYTES(tp) \
149 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
150 #define TG3_RX_RCB_RING_BYTES(tp) \
151 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
152 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
154 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
156 #define TG3_DMA_BYTE_ENAB 64
158 #define TG3_RX_STD_DMA_SZ 1536
159 #define TG3_RX_JMB_DMA_SZ 9046
161 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
163 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
164 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
166 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
167 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
169 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
170 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
172 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
173 * that are at least dword aligned when used in PCIX mode. The driver
174 * works around this bug by double copying the packet. This workaround
175 * is built into the normal double copy length check for efficiency.
177 * However, the double copy is only necessary on those architectures
178 * where unaligned memory accesses are inefficient. For those architectures
179 * where unaligned memory accesses incur little penalty, we can reintegrate
180 * the 5701 in the normal rx path. Doing so saves a device structure
181 * dereference by hardcoding the double copy threshold in place.
183 #define TG3_RX_COPY_THRESHOLD 256
184 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
185 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
187 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
190 #if (NET_IP_ALIGN != 0)
191 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
193 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
196 /* minimum number of free TX descriptors required to wake up TX process */
197 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
198 #define TG3_TX_BD_DMA_MAX_2K 2048
199 #define TG3_TX_BD_DMA_MAX_4K 4096
201 #define TG3_RAW_IP_ALIGN 2
203 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
204 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
206 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
207 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
209 #define FIRMWARE_TG3 "/*(DEBLOBBED)*/"
210 #define FIRMWARE_TG357766 "/*(DEBLOBBED)*/"
211 #define FIRMWARE_TG3TSO "/*(DEBLOBBED)*/"
212 #define FIRMWARE_TG3TSO5 "/*(DEBLOBBED)*/"
214 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
215 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
216 MODULE_LICENSE("GPL");
219 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
220 module_param(tg3_debug, int, 0);
221 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
223 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
224 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
226 static const struct pci_device_id tg3_pci_tbl[] = {
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
246 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
247 TG3_DRV_DATA_FLAG_5705_10_100},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
249 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
250 TG3_DRV_DATA_FLAG_5705_10_100},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
253 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
254 TG3_DRV_DATA_FLAG_5705_10_100},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
261 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
267 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
275 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
276 PCI_VENDOR_ID_LENOVO,
277 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
278 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
281 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
300 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
301 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
302 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
303 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
304 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
305 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
309 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
313 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
314 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
315 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
316 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
319 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
321 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
334 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
335 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
336 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
337 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
338 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
339 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
340 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
341 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
345 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
347 static const struct {
348 const char string[ETH_GSTRING_LEN];
349 } ethtool_stats_keys[] = {
352 { "rx_ucast_packets" },
353 { "rx_mcast_packets" },
354 { "rx_bcast_packets" },
356 { "rx_align_errors" },
357 { "rx_xon_pause_rcvd" },
358 { "rx_xoff_pause_rcvd" },
359 { "rx_mac_ctrl_rcvd" },
360 { "rx_xoff_entered" },
361 { "rx_frame_too_long_errors" },
363 { "rx_undersize_packets" },
364 { "rx_in_length_errors" },
365 { "rx_out_length_errors" },
366 { "rx_64_or_less_octet_packets" },
367 { "rx_65_to_127_octet_packets" },
368 { "rx_128_to_255_octet_packets" },
369 { "rx_256_to_511_octet_packets" },
370 { "rx_512_to_1023_octet_packets" },
371 { "rx_1024_to_1522_octet_packets" },
372 { "rx_1523_to_2047_octet_packets" },
373 { "rx_2048_to_4095_octet_packets" },
374 { "rx_4096_to_8191_octet_packets" },
375 { "rx_8192_to_9022_octet_packets" },
382 { "tx_flow_control" },
384 { "tx_single_collisions" },
385 { "tx_mult_collisions" },
387 { "tx_excessive_collisions" },
388 { "tx_late_collisions" },
389 { "tx_collide_2times" },
390 { "tx_collide_3times" },
391 { "tx_collide_4times" },
392 { "tx_collide_5times" },
393 { "tx_collide_6times" },
394 { "tx_collide_7times" },
395 { "tx_collide_8times" },
396 { "tx_collide_9times" },
397 { "tx_collide_10times" },
398 { "tx_collide_11times" },
399 { "tx_collide_12times" },
400 { "tx_collide_13times" },
401 { "tx_collide_14times" },
402 { "tx_collide_15times" },
403 { "tx_ucast_packets" },
404 { "tx_mcast_packets" },
405 { "tx_bcast_packets" },
406 { "tx_carrier_sense_errors" },
410 { "dma_writeq_full" },
411 { "dma_write_prioq_full" },
415 { "rx_threshold_hit" },
417 { "dma_readq_full" },
418 { "dma_read_prioq_full" },
419 { "tx_comp_queue_full" },
421 { "ring_set_send_prod_index" },
422 { "ring_status_update" },
424 { "nic_avoided_irqs" },
425 { "nic_tx_threshold_hit" },
427 { "mbuf_lwm_thresh_hit" },
430 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
431 #define TG3_NVRAM_TEST 0
432 #define TG3_LINK_TEST 1
433 #define TG3_REGISTER_TEST 2
434 #define TG3_MEMORY_TEST 3
435 #define TG3_MAC_LOOPB_TEST 4
436 #define TG3_PHY_LOOPB_TEST 5
437 #define TG3_EXT_LOOPB_TEST 6
438 #define TG3_INTERRUPT_TEST 7
441 static const struct {
442 const char string[ETH_GSTRING_LEN];
443 } ethtool_test_keys[] = {
444 [TG3_NVRAM_TEST] = { "nvram test (online) " },
445 [TG3_LINK_TEST] = { "link test (online) " },
446 [TG3_REGISTER_TEST] = { "register test (offline)" },
447 [TG3_MEMORY_TEST] = { "memory test (offline)" },
448 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
449 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
450 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
451 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
454 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
457 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
459 writel(val, tp->regs + off);
462 static u32 tg3_read32(struct tg3 *tp, u32 off)
464 return readl(tp->regs + off);
467 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
469 writel(val, tp->aperegs + off);
472 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
474 return readl(tp->aperegs + off);
477 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
481 spin_lock_irqsave(&tp->indirect_lock, flags);
482 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
483 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
484 spin_unlock_irqrestore(&tp->indirect_lock, flags);
487 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
489 writel(val, tp->regs + off);
490 readl(tp->regs + off);
493 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
498 spin_lock_irqsave(&tp->indirect_lock, flags);
499 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
500 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
501 spin_unlock_irqrestore(&tp->indirect_lock, flags);
505 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
509 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
510 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
511 TG3_64BIT_REG_LOW, val);
514 if (off == TG3_RX_STD_PROD_IDX_REG) {
515 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
516 TG3_64BIT_REG_LOW, val);
520 spin_lock_irqsave(&tp->indirect_lock, flags);
521 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
522 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
523 spin_unlock_irqrestore(&tp->indirect_lock, flags);
525 /* In indirect mode when disabling interrupts, we also need
526 * to clear the interrupt bit in the GRC local ctrl register.
528 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
530 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
531 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
535 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
540 spin_lock_irqsave(&tp->indirect_lock, flags);
541 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
542 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
543 spin_unlock_irqrestore(&tp->indirect_lock, flags);
547 /* usec_wait specifies the wait time in usec when writing to certain registers
548 * where it is unsafe to read back the register without some delay.
549 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
550 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
552 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
554 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
555 /* Non-posted methods */
556 tp->write32(tp, off, val);
559 tg3_write32(tp, off, val);
564 /* Wait again after the read for the posted method to guarantee that
565 * the wait time is met.
571 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
573 tp->write32_mbox(tp, off, val);
574 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
575 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
576 !tg3_flag(tp, ICH_WORKAROUND)))
577 tp->read32_mbox(tp, off);
580 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
582 void __iomem *mbox = tp->regs + off;
584 if (tg3_flag(tp, TXD_MBOX_HWBUG))
586 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
587 tg3_flag(tp, FLUSH_POSTED_WRITES))
591 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
593 return readl(tp->regs + off + GRCMBOX_BASE);
596 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
598 writel(val, tp->regs + off + GRCMBOX_BASE);
601 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
602 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
603 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
604 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
605 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
607 #define tw32(reg, val) tp->write32(tp, reg, val)
608 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
609 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
610 #define tr32(reg) tp->read32(tp, reg)
612 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
616 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
617 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
620 spin_lock_irqsave(&tp->indirect_lock, flags);
621 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
622 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
623 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
625 /* Always leave this as zero. */
626 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
628 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
629 tw32_f(TG3PCI_MEM_WIN_DATA, val);
631 /* Always leave this as zero. */
632 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
634 spin_unlock_irqrestore(&tp->indirect_lock, flags);
637 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
641 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
642 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
647 spin_lock_irqsave(&tp->indirect_lock, flags);
648 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
649 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
650 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
652 /* Always leave this as zero. */
653 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
655 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
656 *val = tr32(TG3PCI_MEM_WIN_DATA);
658 /* Always leave this as zero. */
659 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
661 spin_unlock_irqrestore(&tp->indirect_lock, flags);
664 static void tg3_ape_lock_init(struct tg3 *tp)
669 if (tg3_asic_rev(tp) == ASIC_REV_5761)
670 regbase = TG3_APE_LOCK_GRANT;
672 regbase = TG3_APE_PER_LOCK_GRANT;
674 /* Make sure the driver hasn't any stale locks. */
675 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
677 case TG3_APE_LOCK_PHY0:
678 case TG3_APE_LOCK_PHY1:
679 case TG3_APE_LOCK_PHY2:
680 case TG3_APE_LOCK_PHY3:
681 bit = APE_LOCK_GRANT_DRIVER;
685 bit = APE_LOCK_GRANT_DRIVER;
687 bit = 1 << tp->pci_fn;
689 tg3_ape_write32(tp, regbase + 4 * i, bit);
694 static int tg3_ape_lock(struct tg3 *tp, int locknum)
698 u32 status, req, gnt, bit;
700 if (!tg3_flag(tp, ENABLE_APE))
704 case TG3_APE_LOCK_GPIO:
705 if (tg3_asic_rev(tp) == ASIC_REV_5761)
708 case TG3_APE_LOCK_GRC:
709 case TG3_APE_LOCK_MEM:
711 bit = APE_LOCK_REQ_DRIVER;
713 bit = 1 << tp->pci_fn;
715 case TG3_APE_LOCK_PHY0:
716 case TG3_APE_LOCK_PHY1:
717 case TG3_APE_LOCK_PHY2:
718 case TG3_APE_LOCK_PHY3:
719 bit = APE_LOCK_REQ_DRIVER;
725 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
726 req = TG3_APE_LOCK_REQ;
727 gnt = TG3_APE_LOCK_GRANT;
729 req = TG3_APE_PER_LOCK_REQ;
730 gnt = TG3_APE_PER_LOCK_GRANT;
735 tg3_ape_write32(tp, req + off, bit);
737 /* Wait for up to 1 millisecond to acquire lock. */
738 for (i = 0; i < 100; i++) {
739 status = tg3_ape_read32(tp, gnt + off);
742 if (pci_channel_offline(tp->pdev))
749 /* Revoke the lock request. */
750 tg3_ape_write32(tp, gnt + off, bit);
757 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
761 if (!tg3_flag(tp, ENABLE_APE))
765 case TG3_APE_LOCK_GPIO:
766 if (tg3_asic_rev(tp) == ASIC_REV_5761)
769 case TG3_APE_LOCK_GRC:
770 case TG3_APE_LOCK_MEM:
772 bit = APE_LOCK_GRANT_DRIVER;
774 bit = 1 << tp->pci_fn;
776 case TG3_APE_LOCK_PHY0:
777 case TG3_APE_LOCK_PHY1:
778 case TG3_APE_LOCK_PHY2:
779 case TG3_APE_LOCK_PHY3:
780 bit = APE_LOCK_GRANT_DRIVER;
786 if (tg3_asic_rev(tp) == ASIC_REV_5761)
787 gnt = TG3_APE_LOCK_GRANT;
789 gnt = TG3_APE_PER_LOCK_GRANT;
791 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
794 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
799 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
802 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
803 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
806 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
809 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
812 return timeout_us ? 0 : -EBUSY;
815 #ifdef CONFIG_TIGON3_HWMON
816 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
820 for (i = 0; i < timeout_us / 10; i++) {
821 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
823 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
829 return i == timeout_us / 10;
832 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
836 u32 i, bufoff, msgoff, maxlen, apedata;
838 if (!tg3_flag(tp, APE_HAS_NCSI))
841 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
842 if (apedata != APE_SEG_SIG_MAGIC)
845 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
846 if (!(apedata & APE_FW_STATUS_READY))
849 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
851 msgoff = bufoff + 2 * sizeof(u32);
852 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
857 /* Cap xfer sizes to scratchpad limits. */
858 length = (len > maxlen) ? maxlen : len;
861 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
862 if (!(apedata & APE_FW_STATUS_READY))
865 /* Wait for up to 1 msec for APE to service previous event. */
866 err = tg3_ape_event_lock(tp, 1000);
870 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
871 APE_EVENT_STATUS_SCRTCHPD_READ |
872 APE_EVENT_STATUS_EVENT_PENDING;
873 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
875 tg3_ape_write32(tp, bufoff, base_off);
876 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
878 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
879 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
883 if (tg3_ape_wait_for_event(tp, 30000))
886 for (i = 0; length; i += 4, length -= 4) {
887 u32 val = tg3_ape_read32(tp, msgoff + i);
888 memcpy(data, &val, sizeof(u32));
897 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
902 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
903 if (apedata != APE_SEG_SIG_MAGIC)
906 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
907 if (!(apedata & APE_FW_STATUS_READY))
910 /* Wait for up to 20 millisecond for APE to service previous event. */
911 err = tg3_ape_event_lock(tp, 20000);
915 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
916 event | APE_EVENT_STATUS_EVENT_PENDING);
918 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
919 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
924 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
929 if (!tg3_flag(tp, ENABLE_APE))
933 case RESET_KIND_INIT:
934 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
935 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
936 APE_HOST_SEG_SIG_MAGIC);
937 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
938 APE_HOST_SEG_LEN_MAGIC);
939 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
940 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
941 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
942 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
943 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
944 APE_HOST_BEHAV_NO_PHYLOCK);
945 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
946 TG3_APE_HOST_DRVR_STATE_START);
948 event = APE_EVENT_STATUS_STATE_START;
950 case RESET_KIND_SHUTDOWN:
951 if (device_may_wakeup(&tp->pdev->dev) &&
952 tg3_flag(tp, WOL_ENABLE)) {
953 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
954 TG3_APE_HOST_WOL_SPEED_AUTO);
955 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
957 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
959 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
961 event = APE_EVENT_STATUS_STATE_UNLOAD;
967 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
969 tg3_ape_send_event(tp, event);
972 static void tg3_send_ape_heartbeat(struct tg3 *tp,
973 unsigned long interval)
975 /* Check if hb interval has exceeded */
976 if (!tg3_flag(tp, ENABLE_APE) ||
977 time_before(jiffies, tp->ape_hb_jiffies + interval))
980 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
981 tp->ape_hb_jiffies = jiffies;
984 static void tg3_disable_ints(struct tg3 *tp)
988 tw32(TG3PCI_MISC_HOST_CTRL,
989 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
990 for (i = 0; i < tp->irq_max; i++)
991 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
994 static void tg3_enable_ints(struct tg3 *tp)
1001 tw32(TG3PCI_MISC_HOST_CTRL,
1002 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1004 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1005 for (i = 0; i < tp->irq_cnt; i++) {
1006 struct tg3_napi *tnapi = &tp->napi[i];
1008 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1009 if (tg3_flag(tp, 1SHOT_MSI))
1010 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1012 tp->coal_now |= tnapi->coal_now;
1015 /* Force an initial interrupt */
1016 if (!tg3_flag(tp, TAGGED_STATUS) &&
1017 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1018 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1020 tw32(HOSTCC_MODE, tp->coal_now);
1022 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1025 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1027 struct tg3 *tp = tnapi->tp;
1028 struct tg3_hw_status *sblk = tnapi->hw_status;
1029 unsigned int work_exists = 0;
1031 /* check for phy events */
1032 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1033 if (sblk->status & SD_STATUS_LINK_CHG)
1037 /* check for TX work to do */
1038 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1041 /* check for RX work to do */
1042 if (tnapi->rx_rcb_prod_idx &&
1043 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1050 * similar to tg3_enable_ints, but it accurately determines whether there
1051 * is new work pending and can return without flushing the PIO write
1052 * which reenables interrupts
1054 static void tg3_int_reenable(struct tg3_napi *tnapi)
1056 struct tg3 *tp = tnapi->tp;
1058 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1060 /* When doing tagged status, this work check is unnecessary.
1061 * The last_tag we write above tells the chip which piece of
1062 * work we've completed.
1064 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1065 tw32(HOSTCC_MODE, tp->coalesce_mode |
1066 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1069 static void tg3_switch_clocks(struct tg3 *tp)
1072 u32 orig_clock_ctrl;
1074 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1077 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1079 orig_clock_ctrl = clock_ctrl;
1080 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1081 CLOCK_CTRL_CLKRUN_OENABLE |
1083 tp->pci_clock_ctrl = clock_ctrl;
1085 if (tg3_flag(tp, 5705_PLUS)) {
1086 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1087 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1088 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1090 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1091 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1093 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1095 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1096 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1099 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1102 #define PHY_BUSY_LOOPS 5000
1104 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1111 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1113 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1117 tg3_ape_lock(tp, tp->phy_ape_lock);
1121 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1122 MI_COM_PHY_ADDR_MASK);
1123 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1124 MI_COM_REG_ADDR_MASK);
1125 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1127 tw32_f(MAC_MI_COM, frame_val);
1129 loops = PHY_BUSY_LOOPS;
1130 while (loops != 0) {
1132 frame_val = tr32(MAC_MI_COM);
1134 if ((frame_val & MI_COM_BUSY) == 0) {
1136 frame_val = tr32(MAC_MI_COM);
1144 *val = frame_val & MI_COM_DATA_MASK;
1148 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1149 tw32_f(MAC_MI_MODE, tp->mi_mode);
1153 tg3_ape_unlock(tp, tp->phy_ape_lock);
1158 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1160 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1163 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1170 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1171 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1174 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1176 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1180 tg3_ape_lock(tp, tp->phy_ape_lock);
1182 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1183 MI_COM_PHY_ADDR_MASK);
1184 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1185 MI_COM_REG_ADDR_MASK);
1186 frame_val |= (val & MI_COM_DATA_MASK);
1187 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1189 tw32_f(MAC_MI_COM, frame_val);
1191 loops = PHY_BUSY_LOOPS;
1192 while (loops != 0) {
1194 frame_val = tr32(MAC_MI_COM);
1195 if ((frame_val & MI_COM_BUSY) == 0) {
1197 frame_val = tr32(MAC_MI_COM);
1207 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1208 tw32_f(MAC_MI_MODE, tp->mi_mode);
1212 tg3_ape_unlock(tp, tp->phy_ape_lock);
1217 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1219 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1222 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1226 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1230 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1234 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1235 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1239 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1245 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1249 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1253 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1257 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1258 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1262 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1268 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1272 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1274 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1279 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1283 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1285 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1290 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1294 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1295 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1296 MII_TG3_AUXCTL_SHDWSEL_MISC);
1298 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1303 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1305 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1306 set |= MII_TG3_AUXCTL_MISC_WREN;
1308 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1311 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1316 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1322 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1324 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1326 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1327 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1332 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1334 return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1335 reg | val | MII_TG3_MISC_SHDW_WREN);
1338 static int tg3_bmcr_reset(struct tg3 *tp)
1343 /* OK, reset it, and poll the BMCR_RESET bit until it
1344 * clears or we time out.
1346 phy_control = BMCR_RESET;
1347 err = tg3_writephy(tp, MII_BMCR, phy_control);
1353 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1357 if ((phy_control & BMCR_RESET) == 0) {
1369 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1371 struct tg3 *tp = bp->priv;
1374 spin_lock_bh(&tp->lock);
1376 if (__tg3_readphy(tp, mii_id, reg, &val))
1379 spin_unlock_bh(&tp->lock);
1384 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1386 struct tg3 *tp = bp->priv;
1389 spin_lock_bh(&tp->lock);
1391 if (__tg3_writephy(tp, mii_id, reg, val))
1394 spin_unlock_bh(&tp->lock);
1399 static void tg3_mdio_config_5785(struct tg3 *tp)
1402 struct phy_device *phydev;
1404 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1405 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1406 case PHY_ID_BCM50610:
1407 case PHY_ID_BCM50610M:
1408 val = MAC_PHYCFG2_50610_LED_MODES;
1410 case PHY_ID_BCMAC131:
1411 val = MAC_PHYCFG2_AC131_LED_MODES;
1413 case PHY_ID_RTL8211C:
1414 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1416 case PHY_ID_RTL8201E:
1417 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1423 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1424 tw32(MAC_PHYCFG2, val);
1426 val = tr32(MAC_PHYCFG1);
1427 val &= ~(MAC_PHYCFG1_RGMII_INT |
1428 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1429 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1430 tw32(MAC_PHYCFG1, val);
1435 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1436 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1437 MAC_PHYCFG2_FMODE_MASK_MASK |
1438 MAC_PHYCFG2_GMODE_MASK_MASK |
1439 MAC_PHYCFG2_ACT_MASK_MASK |
1440 MAC_PHYCFG2_QUAL_MASK_MASK |
1441 MAC_PHYCFG2_INBAND_ENABLE;
1443 tw32(MAC_PHYCFG2, val);
1445 val = tr32(MAC_PHYCFG1);
1446 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1447 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1448 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1449 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1450 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1451 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1452 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1454 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1455 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1456 tw32(MAC_PHYCFG1, val);
1458 val = tr32(MAC_EXT_RGMII_MODE);
1459 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1460 MAC_RGMII_MODE_RX_QUALITY |
1461 MAC_RGMII_MODE_RX_ACTIVITY |
1462 MAC_RGMII_MODE_RX_ENG_DET |
1463 MAC_RGMII_MODE_TX_ENABLE |
1464 MAC_RGMII_MODE_TX_LOWPWR |
1465 MAC_RGMII_MODE_TX_RESET);
1466 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1467 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1468 val |= MAC_RGMII_MODE_RX_INT_B |
1469 MAC_RGMII_MODE_RX_QUALITY |
1470 MAC_RGMII_MODE_RX_ACTIVITY |
1471 MAC_RGMII_MODE_RX_ENG_DET;
1472 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1473 val |= MAC_RGMII_MODE_TX_ENABLE |
1474 MAC_RGMII_MODE_TX_LOWPWR |
1475 MAC_RGMII_MODE_TX_RESET;
1477 tw32(MAC_EXT_RGMII_MODE, val);
1480 static void tg3_mdio_start(struct tg3 *tp)
1482 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1483 tw32_f(MAC_MI_MODE, tp->mi_mode);
1486 if (tg3_flag(tp, MDIOBUS_INITED) &&
1487 tg3_asic_rev(tp) == ASIC_REV_5785)
1488 tg3_mdio_config_5785(tp);
1491 static int tg3_mdio_init(struct tg3 *tp)
1495 struct phy_device *phydev;
1497 if (tg3_flag(tp, 5717_PLUS)) {
1500 tp->phy_addr = tp->pci_fn + 1;
1502 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1503 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1505 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1506 TG3_CPMU_PHY_STRAP_IS_SERDES;
1509 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1512 addr = ssb_gige_get_phyaddr(tp->pdev);
1515 tp->phy_addr = addr;
1517 tp->phy_addr = TG3_PHY_MII_ADDR;
1521 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1524 tp->mdio_bus = mdiobus_alloc();
1525 if (tp->mdio_bus == NULL)
1528 tp->mdio_bus->name = "tg3 mdio bus";
1529 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", pci_dev_id(tp->pdev));
1530 tp->mdio_bus->priv = tp;
1531 tp->mdio_bus->parent = &tp->pdev->dev;
1532 tp->mdio_bus->read = &tg3_mdio_read;
1533 tp->mdio_bus->write = &tg3_mdio_write;
1534 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1536 /* The bus registration will look for all the PHYs on the mdio bus.
1537 * Unfortunately, it does not ensure the PHY is powered up before
1538 * accessing the PHY ID registers. A chip reset is the
1539 * quickest way to bring the device back to an operational state..
1541 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1544 i = mdiobus_register(tp->mdio_bus);
1546 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1547 mdiobus_free(tp->mdio_bus);
1551 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1553 if (!phydev || !phydev->drv) {
1554 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1555 mdiobus_unregister(tp->mdio_bus);
1556 mdiobus_free(tp->mdio_bus);
1560 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1561 case PHY_ID_BCM57780:
1562 phydev->interface = PHY_INTERFACE_MODE_GMII;
1563 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1565 case PHY_ID_BCM50610:
1566 case PHY_ID_BCM50610M:
1567 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1568 PHY_BRCM_RX_REFCLK_UNUSED |
1569 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1570 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1572 case PHY_ID_RTL8211C:
1573 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1575 case PHY_ID_RTL8201E:
1576 case PHY_ID_BCMAC131:
1577 phydev->interface = PHY_INTERFACE_MODE_MII;
1578 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1579 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1583 tg3_flag_set(tp, MDIOBUS_INITED);
1585 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1586 tg3_mdio_config_5785(tp);
1591 static void tg3_mdio_fini(struct tg3 *tp)
1593 if (tg3_flag(tp, MDIOBUS_INITED)) {
1594 tg3_flag_clear(tp, MDIOBUS_INITED);
1595 mdiobus_unregister(tp->mdio_bus);
1596 mdiobus_free(tp->mdio_bus);
1600 /* tp->lock is held. */
1601 static inline void tg3_generate_fw_event(struct tg3 *tp)
1605 val = tr32(GRC_RX_CPU_EVENT);
1606 val |= GRC_RX_CPU_DRIVER_EVENT;
1607 tw32_f(GRC_RX_CPU_EVENT, val);
1609 tp->last_event_jiffies = jiffies;
1612 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1614 /* tp->lock is held. */
1615 static void tg3_wait_for_event_ack(struct tg3 *tp)
1618 unsigned int delay_cnt;
1621 /* If enough time has passed, no wait is necessary. */
1622 time_remain = (long)(tp->last_event_jiffies + 1 +
1623 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1625 if (time_remain < 0)
1628 /* Check if we can shorten the wait time. */
1629 delay_cnt = jiffies_to_usecs(time_remain);
1630 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1631 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1632 delay_cnt = (delay_cnt >> 3) + 1;
1634 for (i = 0; i < delay_cnt; i++) {
1635 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1637 if (pci_channel_offline(tp->pdev))
1644 /* tp->lock is held. */
1645 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1650 if (!tg3_readphy(tp, MII_BMCR, ®))
1652 if (!tg3_readphy(tp, MII_BMSR, ®))
1653 val |= (reg & 0xffff);
1657 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1659 if (!tg3_readphy(tp, MII_LPA, ®))
1660 val |= (reg & 0xffff);
1664 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1665 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1667 if (!tg3_readphy(tp, MII_STAT1000, ®))
1668 val |= (reg & 0xffff);
1672 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1679 /* tp->lock is held. */
1680 static void tg3_ump_link_report(struct tg3 *tp)
1684 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1687 tg3_phy_gather_ump_data(tp, data);
1689 tg3_wait_for_event_ack(tp);
1691 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1692 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1693 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1694 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1695 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1696 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1698 tg3_generate_fw_event(tp);
1701 /* tp->lock is held. */
1702 static void tg3_stop_fw(struct tg3 *tp)
1704 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1705 /* Wait for RX cpu to ACK the previous event. */
1706 tg3_wait_for_event_ack(tp);
1708 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1710 tg3_generate_fw_event(tp);
1712 /* Wait for RX cpu to ACK this event. */
1713 tg3_wait_for_event_ack(tp);
1717 /* tp->lock is held. */
1718 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1720 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1721 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1723 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1725 case RESET_KIND_INIT:
1726 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1730 case RESET_KIND_SHUTDOWN:
1731 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1735 case RESET_KIND_SUSPEND:
1736 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1746 /* tp->lock is held. */
1747 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1749 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1751 case RESET_KIND_INIT:
1752 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1753 DRV_STATE_START_DONE);
1756 case RESET_KIND_SHUTDOWN:
1757 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1758 DRV_STATE_UNLOAD_DONE);
1767 /* tp->lock is held. */
1768 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1770 if (tg3_flag(tp, ENABLE_ASF)) {
1772 case RESET_KIND_INIT:
1773 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1777 case RESET_KIND_SHUTDOWN:
1778 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1782 case RESET_KIND_SUSPEND:
1783 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1793 static int tg3_poll_fw(struct tg3 *tp)
1798 if (tg3_flag(tp, NO_FWARE_REPORTED))
1801 if (tg3_flag(tp, IS_SSB_CORE)) {
1802 /* We don't use firmware. */
1806 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1807 /* Wait up to 20ms for init done. */
1808 for (i = 0; i < 200; i++) {
1809 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1811 if (pci_channel_offline(tp->pdev))
1819 /* Wait for firmware initialization to complete. */
1820 for (i = 0; i < 100000; i++) {
1821 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1822 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1824 if (pci_channel_offline(tp->pdev)) {
1825 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1826 tg3_flag_set(tp, NO_FWARE_REPORTED);
1827 netdev_info(tp->dev, "No firmware running\n");
1836 /* Chip might not be fitted with firmware. Some Sun onboard
1837 * parts are configured like that. So don't signal the timeout
1838 * of the above loop as an error, but do report the lack of
1839 * running firmware once.
1841 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1842 tg3_flag_set(tp, NO_FWARE_REPORTED);
1844 netdev_info(tp->dev, "No firmware running\n");
1847 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1848 /* The 57765 A0 needs a little more
1849 * time to do some important work.
1857 static void tg3_link_report(struct tg3 *tp)
1859 if (!netif_carrier_ok(tp->dev)) {
1860 netif_info(tp, link, tp->dev, "Link is down\n");
1861 tg3_ump_link_report(tp);
1862 } else if (netif_msg_link(tp)) {
1863 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1864 (tp->link_config.active_speed == SPEED_1000 ?
1866 (tp->link_config.active_speed == SPEED_100 ?
1868 (tp->link_config.active_duplex == DUPLEX_FULL ?
1871 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1872 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1874 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1877 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1878 netdev_info(tp->dev, "EEE is %s\n",
1879 tp->setlpicnt ? "enabled" : "disabled");
1881 tg3_ump_link_report(tp);
1884 tp->link_up = netif_carrier_ok(tp->dev);
1887 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1891 if (adv & ADVERTISE_PAUSE_CAP) {
1892 flowctrl |= FLOW_CTRL_RX;
1893 if (!(adv & ADVERTISE_PAUSE_ASYM))
1894 flowctrl |= FLOW_CTRL_TX;
1895 } else if (adv & ADVERTISE_PAUSE_ASYM)
1896 flowctrl |= FLOW_CTRL_TX;
1901 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1905 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1906 miireg = ADVERTISE_1000XPAUSE;
1907 else if (flow_ctrl & FLOW_CTRL_TX)
1908 miireg = ADVERTISE_1000XPSE_ASYM;
1909 else if (flow_ctrl & FLOW_CTRL_RX)
1910 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1917 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1921 if (adv & ADVERTISE_1000XPAUSE) {
1922 flowctrl |= FLOW_CTRL_RX;
1923 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1924 flowctrl |= FLOW_CTRL_TX;
1925 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1926 flowctrl |= FLOW_CTRL_TX;
1931 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1935 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1936 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1937 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1938 if (lcladv & ADVERTISE_1000XPAUSE)
1940 if (rmtadv & ADVERTISE_1000XPAUSE)
1947 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1951 u32 old_rx_mode = tp->rx_mode;
1952 u32 old_tx_mode = tp->tx_mode;
1954 if (tg3_flag(tp, USE_PHYLIB))
1955 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1957 autoneg = tp->link_config.autoneg;
1959 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1960 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1961 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1963 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1965 flowctrl = tp->link_config.flowctrl;
1967 tp->link_config.active_flowctrl = flowctrl;
1969 if (flowctrl & FLOW_CTRL_RX)
1970 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1972 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1974 if (old_rx_mode != tp->rx_mode)
1975 tw32_f(MAC_RX_MODE, tp->rx_mode);
1977 if (flowctrl & FLOW_CTRL_TX)
1978 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1980 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1982 if (old_tx_mode != tp->tx_mode)
1983 tw32_f(MAC_TX_MODE, tp->tx_mode);
1986 static void tg3_adjust_link(struct net_device *dev)
1988 u8 oldflowctrl, linkmesg = 0;
1989 u32 mac_mode, lcl_adv, rmt_adv;
1990 struct tg3 *tp = netdev_priv(dev);
1991 struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1993 spin_lock_bh(&tp->lock);
1995 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1996 MAC_MODE_HALF_DUPLEX);
1998 oldflowctrl = tp->link_config.active_flowctrl;
2004 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2005 mac_mode |= MAC_MODE_PORT_MODE_MII;
2006 else if (phydev->speed == SPEED_1000 ||
2007 tg3_asic_rev(tp) != ASIC_REV_5785)
2008 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2010 mac_mode |= MAC_MODE_PORT_MODE_MII;
2012 if (phydev->duplex == DUPLEX_HALF)
2013 mac_mode |= MAC_MODE_HALF_DUPLEX;
2015 lcl_adv = mii_advertise_flowctrl(
2016 tp->link_config.flowctrl);
2019 rmt_adv = LPA_PAUSE_CAP;
2020 if (phydev->asym_pause)
2021 rmt_adv |= LPA_PAUSE_ASYM;
2024 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2026 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2028 if (mac_mode != tp->mac_mode) {
2029 tp->mac_mode = mac_mode;
2030 tw32_f(MAC_MODE, tp->mac_mode);
2034 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2035 if (phydev->speed == SPEED_10)
2037 MAC_MI_STAT_10MBPS_MODE |
2038 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2040 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2043 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2044 tw32(MAC_TX_LENGTHS,
2045 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2046 (6 << TX_LENGTHS_IPG_SHIFT) |
2047 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2049 tw32(MAC_TX_LENGTHS,
2050 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2051 (6 << TX_LENGTHS_IPG_SHIFT) |
2052 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2054 if (phydev->link != tp->old_link ||
2055 phydev->speed != tp->link_config.active_speed ||
2056 phydev->duplex != tp->link_config.active_duplex ||
2057 oldflowctrl != tp->link_config.active_flowctrl)
2060 tp->old_link = phydev->link;
2061 tp->link_config.active_speed = phydev->speed;
2062 tp->link_config.active_duplex = phydev->duplex;
2064 spin_unlock_bh(&tp->lock);
2067 tg3_link_report(tp);
2070 static int tg3_phy_init(struct tg3 *tp)
2072 struct phy_device *phydev;
2074 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2077 /* Bring the PHY back to a known state. */
2080 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2082 /* Attach the MAC to the PHY. */
2083 phydev = phy_connect(tp->dev, phydev_name(phydev),
2084 tg3_adjust_link, phydev->interface);
2085 if (IS_ERR(phydev)) {
2086 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2087 return PTR_ERR(phydev);
2090 /* Mask with MAC supported features. */
2091 switch (phydev->interface) {
2092 case PHY_INTERFACE_MODE_GMII:
2093 case PHY_INTERFACE_MODE_RGMII:
2094 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2095 phy_set_max_speed(phydev, SPEED_1000);
2096 phy_support_asym_pause(phydev);
2100 case PHY_INTERFACE_MODE_MII:
2101 phy_set_max_speed(phydev, SPEED_100);
2102 phy_support_asym_pause(phydev);
2105 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2109 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2111 phy_attached_info(phydev);
2116 static void tg3_phy_start(struct tg3 *tp)
2118 struct phy_device *phydev;
2120 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2123 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2125 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2126 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2127 phydev->speed = tp->link_config.speed;
2128 phydev->duplex = tp->link_config.duplex;
2129 phydev->autoneg = tp->link_config.autoneg;
2130 ethtool_convert_legacy_u32_to_link_mode(
2131 phydev->advertising, tp->link_config.advertising);
2136 phy_start_aneg(phydev);
2139 static void tg3_phy_stop(struct tg3 *tp)
2141 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2144 phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2147 static void tg3_phy_fini(struct tg3 *tp)
2149 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2150 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2151 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2155 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2160 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2163 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2164 /* Cannot do read-modify-write on 5401 */
2165 err = tg3_phy_auxctl_write(tp,
2166 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2167 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2172 err = tg3_phy_auxctl_read(tp,
2173 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2177 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2178 err = tg3_phy_auxctl_write(tp,
2179 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2185 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2189 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2192 tg3_writephy(tp, MII_TG3_FET_TEST,
2193 phytest | MII_TG3_FET_SHADOW_EN);
2194 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2196 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2198 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2199 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2201 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2205 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2209 if (!tg3_flag(tp, 5705_PLUS) ||
2210 (tg3_flag(tp, 5717_PLUS) &&
2211 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2214 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2215 tg3_phy_fet_toggle_apd(tp, enable);
2219 reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2220 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2221 MII_TG3_MISC_SHDW_SCR5_SDTL |
2222 MII_TG3_MISC_SHDW_SCR5_C125OE;
2223 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2224 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2226 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2229 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2231 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2233 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2236 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2240 if (!tg3_flag(tp, 5705_PLUS) ||
2241 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2244 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2247 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2248 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2250 tg3_writephy(tp, MII_TG3_FET_TEST,
2251 ephy | MII_TG3_FET_SHADOW_EN);
2252 if (!tg3_readphy(tp, reg, &phy)) {
2254 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2256 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2257 tg3_writephy(tp, reg, phy);
2259 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2264 ret = tg3_phy_auxctl_read(tp,
2265 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2268 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2270 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2271 tg3_phy_auxctl_write(tp,
2272 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2277 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2282 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2285 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2287 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2288 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2291 static void tg3_phy_apply_otp(struct tg3 *tp)
2300 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2303 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2304 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2305 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2307 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2308 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2309 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2311 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2312 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2313 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2315 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2316 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2318 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2319 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2321 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2322 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2323 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2325 tg3_phy_toggle_auxctl_smdsp(tp, false);
2328 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2331 struct ethtool_eee *dest = &tp->eee;
2333 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2339 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2342 /* Pull eee_active */
2343 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2344 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2345 dest->eee_active = 1;
2347 dest->eee_active = 0;
2349 /* Pull lp advertised settings */
2350 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2352 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2354 /* Pull advertised and eee_enabled settings */
2355 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2357 dest->eee_enabled = !!val;
2358 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2360 /* Pull tx_lpi_enabled */
2361 val = tr32(TG3_CPMU_EEE_MODE);
2362 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2364 /* Pull lpi timer value */
2365 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2368 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2372 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2377 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2379 tp->link_config.active_duplex == DUPLEX_FULL &&
2380 (tp->link_config.active_speed == SPEED_100 ||
2381 tp->link_config.active_speed == SPEED_1000)) {
2384 if (tp->link_config.active_speed == SPEED_1000)
2385 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2387 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2389 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2391 tg3_eee_pull_config(tp, NULL);
2392 if (tp->eee.eee_active)
2396 if (!tp->setlpicnt) {
2397 if (current_link_up &&
2398 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2399 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2400 tg3_phy_toggle_auxctl_smdsp(tp, false);
2403 val = tr32(TG3_CPMU_EEE_MODE);
2404 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2408 static void tg3_phy_eee_enable(struct tg3 *tp)
2412 if (tp->link_config.active_speed == SPEED_1000 &&
2413 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2414 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2415 tg3_flag(tp, 57765_CLASS)) &&
2416 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2417 val = MII_TG3_DSP_TAP26_ALNOKO |
2418 MII_TG3_DSP_TAP26_RMRXSTO;
2419 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2420 tg3_phy_toggle_auxctl_smdsp(tp, false);
2423 val = tr32(TG3_CPMU_EEE_MODE);
2424 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2427 static int tg3_wait_macro_done(struct tg3 *tp)
2434 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2435 if ((tmp32 & 0x1000) == 0)
2445 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2447 static const u32 test_pat[4][6] = {
2448 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2449 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2450 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2451 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2455 for (chan = 0; chan < 4; chan++) {
2458 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2459 (chan * 0x2000) | 0x0200);
2460 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2462 for (i = 0; i < 6; i++)
2463 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2466 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2467 if (tg3_wait_macro_done(tp)) {
2472 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2473 (chan * 0x2000) | 0x0200);
2474 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2475 if (tg3_wait_macro_done(tp)) {
2480 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2481 if (tg3_wait_macro_done(tp)) {
2486 for (i = 0; i < 6; i += 2) {
2489 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2490 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2491 tg3_wait_macro_done(tp)) {
2497 if (low != test_pat[chan][i] ||
2498 high != test_pat[chan][i+1]) {
2499 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2500 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2501 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2511 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2515 for (chan = 0; chan < 4; chan++) {
2518 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2519 (chan * 0x2000) | 0x0200);
2520 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2521 for (i = 0; i < 6; i++)
2522 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2523 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2524 if (tg3_wait_macro_done(tp))
2531 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2533 u32 reg32, phy9_orig;
2534 int retries, do_phy_reset, err;
2540 err = tg3_bmcr_reset(tp);
2546 /* Disable transmitter and interrupt. */
2547 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2551 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2553 /* Set full-duplex, 1000 mbps. */
2554 tg3_writephy(tp, MII_BMCR,
2555 BMCR_FULLDPLX | BMCR_SPEED1000);
2557 /* Set to master mode. */
2558 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2561 tg3_writephy(tp, MII_CTRL1000,
2562 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2564 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2568 /* Block the PHY control access. */
2569 tg3_phydsp_write(tp, 0x8005, 0x0800);
2571 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2574 } while (--retries);
2576 err = tg3_phy_reset_chanpat(tp);
2580 tg3_phydsp_write(tp, 0x8005, 0x0000);
2582 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2583 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2585 tg3_phy_toggle_auxctl_smdsp(tp, false);
2587 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2589 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32);
2594 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2599 static void tg3_carrier_off(struct tg3 *tp)
2601 netif_carrier_off(tp->dev);
2602 tp->link_up = false;
2605 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2607 if (tg3_flag(tp, ENABLE_ASF))
2608 netdev_warn(tp->dev,
2609 "Management side-band traffic will be interrupted during phy settings change\n");
2612 /* This will reset the tigon3 PHY if there is no valid
2613 * link unless the FORCE argument is non-zero.
2615 static int tg3_phy_reset(struct tg3 *tp)
2620 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2621 val = tr32(GRC_MISC_CFG);
2622 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2625 err = tg3_readphy(tp, MII_BMSR, &val);
2626 err |= tg3_readphy(tp, MII_BMSR, &val);
2630 if (netif_running(tp->dev) && tp->link_up) {
2631 netif_carrier_off(tp->dev);
2632 tg3_link_report(tp);
2635 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2636 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2637 tg3_asic_rev(tp) == ASIC_REV_5705) {
2638 err = tg3_phy_reset_5703_4_5(tp);
2645 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2646 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2647 cpmuctrl = tr32(TG3_CPMU_CTRL);
2648 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2650 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2653 err = tg3_bmcr_reset(tp);
2657 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2658 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2659 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2661 tw32(TG3_CPMU_CTRL, cpmuctrl);
2664 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2665 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2666 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2667 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2668 CPMU_LSPD_1000MB_MACCLK_12_5) {
2669 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2671 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2675 if (tg3_flag(tp, 5717_PLUS) &&
2676 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2679 tg3_phy_apply_otp(tp);
2681 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2682 tg3_phy_toggle_apd(tp, true);
2684 tg3_phy_toggle_apd(tp, false);
2687 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2688 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2689 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2690 tg3_phydsp_write(tp, 0x000a, 0x0323);
2691 tg3_phy_toggle_auxctl_smdsp(tp, false);
2694 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2695 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2696 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2699 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2700 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2701 tg3_phydsp_write(tp, 0x000a, 0x310b);
2702 tg3_phydsp_write(tp, 0x201f, 0x9506);
2703 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2704 tg3_phy_toggle_auxctl_smdsp(tp, false);
2706 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2707 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2708 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2709 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2710 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2711 tg3_writephy(tp, MII_TG3_TEST1,
2712 MII_TG3_TEST1_TRIM_EN | 0x4);
2714 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2716 tg3_phy_toggle_auxctl_smdsp(tp, false);
2720 /* Set Extended packet length bit (bit 14) on all chips that */
2721 /* support jumbo frames */
2722 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2723 /* Cannot do read-modify-write on 5401 */
2724 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2725 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2726 /* Set bit 14 with read-modify-write to preserve other bits */
2727 err = tg3_phy_auxctl_read(tp,
2728 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2730 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2731 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2734 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2735 * jumbo frames transmission.
2737 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2738 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2739 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2740 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2743 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2744 /* adjust output voltage */
2745 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2748 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2749 tg3_phydsp_write(tp, 0xffb, 0x4000);
2751 tg3_phy_toggle_automdix(tp, true);
2752 tg3_phy_set_wirespeed(tp);
2756 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2757 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2758 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2759 TG3_GPIO_MSG_NEED_VAUX)
2760 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2761 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2762 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2763 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2764 (TG3_GPIO_MSG_DRVR_PRES << 12))
2766 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2767 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2768 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2769 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2770 (TG3_GPIO_MSG_NEED_VAUX << 12))
2772 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2776 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2777 tg3_asic_rev(tp) == ASIC_REV_5719)
2778 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2780 status = tr32(TG3_CPMU_DRV_STATUS);
2782 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2783 status &= ~(TG3_GPIO_MSG_MASK << shift);
2784 status |= (newstat << shift);
2786 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2787 tg3_asic_rev(tp) == ASIC_REV_5719)
2788 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2790 tw32(TG3_CPMU_DRV_STATUS, status);
2792 return status >> TG3_APE_GPIO_MSG_SHIFT;
2795 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2797 if (!tg3_flag(tp, IS_NIC))
2800 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2801 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2802 tg3_asic_rev(tp) == ASIC_REV_5720) {
2803 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2806 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2808 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2809 TG3_GRC_LCLCTL_PWRSW_DELAY);
2811 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2813 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2814 TG3_GRC_LCLCTL_PWRSW_DELAY);
2820 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2824 if (!tg3_flag(tp, IS_NIC) ||
2825 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2826 tg3_asic_rev(tp) == ASIC_REV_5701)
2829 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2831 tw32_wait_f(GRC_LOCAL_CTRL,
2832 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2833 TG3_GRC_LCLCTL_PWRSW_DELAY);
2835 tw32_wait_f(GRC_LOCAL_CTRL,
2837 TG3_GRC_LCLCTL_PWRSW_DELAY);
2839 tw32_wait_f(GRC_LOCAL_CTRL,
2840 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2841 TG3_GRC_LCLCTL_PWRSW_DELAY);
2844 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2846 if (!tg3_flag(tp, IS_NIC))
2849 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2850 tg3_asic_rev(tp) == ASIC_REV_5701) {
2851 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2852 (GRC_LCLCTRL_GPIO_OE0 |
2853 GRC_LCLCTRL_GPIO_OE1 |
2854 GRC_LCLCTRL_GPIO_OE2 |
2855 GRC_LCLCTRL_GPIO_OUTPUT0 |
2856 GRC_LCLCTRL_GPIO_OUTPUT1),
2857 TG3_GRC_LCLCTL_PWRSW_DELAY);
2858 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2859 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2860 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2861 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2862 GRC_LCLCTRL_GPIO_OE1 |
2863 GRC_LCLCTRL_GPIO_OE2 |
2864 GRC_LCLCTRL_GPIO_OUTPUT0 |
2865 GRC_LCLCTRL_GPIO_OUTPUT1 |
2867 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2868 TG3_GRC_LCLCTL_PWRSW_DELAY);
2870 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2871 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2872 TG3_GRC_LCLCTL_PWRSW_DELAY);
2874 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2875 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2876 TG3_GRC_LCLCTL_PWRSW_DELAY);
2879 u32 grc_local_ctrl = 0;
2881 /* Workaround to prevent overdrawing Amps. */
2882 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2883 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2884 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2886 TG3_GRC_LCLCTL_PWRSW_DELAY);
2889 /* On 5753 and variants, GPIO2 cannot be used. */
2890 no_gpio2 = tp->nic_sram_data_cfg &
2891 NIC_SRAM_DATA_CFG_NO_GPIO2;
2893 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2894 GRC_LCLCTRL_GPIO_OE1 |
2895 GRC_LCLCTRL_GPIO_OE2 |
2896 GRC_LCLCTRL_GPIO_OUTPUT1 |
2897 GRC_LCLCTRL_GPIO_OUTPUT2;
2899 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2900 GRC_LCLCTRL_GPIO_OUTPUT2);
2902 tw32_wait_f(GRC_LOCAL_CTRL,
2903 tp->grc_local_ctrl | grc_local_ctrl,
2904 TG3_GRC_LCLCTL_PWRSW_DELAY);
2906 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2908 tw32_wait_f(GRC_LOCAL_CTRL,
2909 tp->grc_local_ctrl | grc_local_ctrl,
2910 TG3_GRC_LCLCTL_PWRSW_DELAY);
2913 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2914 tw32_wait_f(GRC_LOCAL_CTRL,
2915 tp->grc_local_ctrl | grc_local_ctrl,
2916 TG3_GRC_LCLCTL_PWRSW_DELAY);
2921 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2925 /* Serialize power state transitions */
2926 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2929 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2930 msg = TG3_GPIO_MSG_NEED_VAUX;
2932 msg = tg3_set_function_status(tp, msg);
2934 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2937 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2938 tg3_pwrsrc_switch_to_vaux(tp);
2940 tg3_pwrsrc_die_with_vmain(tp);
2943 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2946 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2948 bool need_vaux = false;
2950 /* The GPIOs do something completely different on 57765. */
2951 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2954 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2955 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2956 tg3_asic_rev(tp) == ASIC_REV_5720) {
2957 tg3_frob_aux_power_5717(tp, include_wol ?
2958 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2962 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2963 struct net_device *dev_peer;
2965 dev_peer = pci_get_drvdata(tp->pdev_peer);
2967 /* remove_one() may have been run on the peer. */
2969 struct tg3 *tp_peer = netdev_priv(dev_peer);
2971 if (tg3_flag(tp_peer, INIT_COMPLETE))
2974 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2975 tg3_flag(tp_peer, ENABLE_ASF))
2980 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2981 tg3_flag(tp, ENABLE_ASF))
2985 tg3_pwrsrc_switch_to_vaux(tp);
2987 tg3_pwrsrc_die_with_vmain(tp);
2990 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2992 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2994 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2995 if (speed != SPEED_10)
2997 } else if (speed == SPEED_10)
3003 static bool tg3_phy_power_bug(struct tg3 *tp)
3005 switch (tg3_asic_rev(tp)) {
3010 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3019 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3028 static bool tg3_phy_led_bug(struct tg3 *tp)
3030 switch (tg3_asic_rev(tp)) {
3033 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3042 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3046 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3049 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3050 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3051 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3052 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3055 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3056 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3057 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3062 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3064 val = tr32(GRC_MISC_CFG);
3065 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3068 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3070 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3073 tg3_writephy(tp, MII_ADVERTISE, 0);
3074 tg3_writephy(tp, MII_BMCR,
3075 BMCR_ANENABLE | BMCR_ANRESTART);
3077 tg3_writephy(tp, MII_TG3_FET_TEST,
3078 phytest | MII_TG3_FET_SHADOW_EN);
3079 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3080 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3082 MII_TG3_FET_SHDW_AUXMODE4,
3085 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3088 } else if (do_low_power) {
3089 if (!tg3_phy_led_bug(tp))
3090 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3091 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3093 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3094 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3095 MII_TG3_AUXCTL_PCTL_VREG_11V;
3096 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3099 /* The PHY should not be powered down on some chips because
3102 if (tg3_phy_power_bug(tp))
3105 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3106 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3107 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3108 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3109 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3110 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3113 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3116 /* tp->lock is held. */
3117 static int tg3_nvram_lock(struct tg3 *tp)
3119 if (tg3_flag(tp, NVRAM)) {
3122 if (tp->nvram_lock_cnt == 0) {
3123 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3124 for (i = 0; i < 8000; i++) {
3125 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3130 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3134 tp->nvram_lock_cnt++;
3139 /* tp->lock is held. */
3140 static void tg3_nvram_unlock(struct tg3 *tp)
3142 if (tg3_flag(tp, NVRAM)) {
3143 if (tp->nvram_lock_cnt > 0)
3144 tp->nvram_lock_cnt--;
3145 if (tp->nvram_lock_cnt == 0)
3146 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3150 /* tp->lock is held. */
3151 static void tg3_enable_nvram_access(struct tg3 *tp)
3153 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3154 u32 nvaccess = tr32(NVRAM_ACCESS);
3156 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3160 /* tp->lock is held. */
3161 static void tg3_disable_nvram_access(struct tg3 *tp)
3163 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3164 u32 nvaccess = tr32(NVRAM_ACCESS);
3166 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3170 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3171 u32 offset, u32 *val)
3176 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3179 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3180 EEPROM_ADDR_DEVID_MASK |
3182 tw32(GRC_EEPROM_ADDR,
3184 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3185 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3186 EEPROM_ADDR_ADDR_MASK) |
3187 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3189 for (i = 0; i < 1000; i++) {
3190 tmp = tr32(GRC_EEPROM_ADDR);
3192 if (tmp & EEPROM_ADDR_COMPLETE)
3196 if (!(tmp & EEPROM_ADDR_COMPLETE))
3199 tmp = tr32(GRC_EEPROM_DATA);
3202 * The data will always be opposite the native endian
3203 * format. Perform a blind byteswap to compensate.
3210 #define NVRAM_CMD_TIMEOUT 10000
3212 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3216 tw32(NVRAM_CMD, nvram_cmd);
3217 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3218 usleep_range(10, 40);
3219 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3225 if (i == NVRAM_CMD_TIMEOUT)
3231 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3233 if (tg3_flag(tp, NVRAM) &&
3234 tg3_flag(tp, NVRAM_BUFFERED) &&
3235 tg3_flag(tp, FLASH) &&
3236 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3237 (tp->nvram_jedecnum == JEDEC_ATMEL))
3239 addr = ((addr / tp->nvram_pagesize) <<
3240 ATMEL_AT45DB0X1B_PAGE_POS) +
3241 (addr % tp->nvram_pagesize);
3246 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3248 if (tg3_flag(tp, NVRAM) &&
3249 tg3_flag(tp, NVRAM_BUFFERED) &&
3250 tg3_flag(tp, FLASH) &&
3251 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3252 (tp->nvram_jedecnum == JEDEC_ATMEL))
3254 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3255 tp->nvram_pagesize) +
3256 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3261 /* NOTE: Data read in from NVRAM is byteswapped according to
3262 * the byteswapping settings for all other register accesses.
3263 * tg3 devices are BE devices, so on a BE machine, the data
3264 * returned will be exactly as it is seen in NVRAM. On a LE
3265 * machine, the 32-bit value will be byteswapped.
3267 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3271 if (!tg3_flag(tp, NVRAM))
3272 return tg3_nvram_read_using_eeprom(tp, offset, val);
3274 offset = tg3_nvram_phys_addr(tp, offset);
3276 if (offset > NVRAM_ADDR_MSK)
3279 ret = tg3_nvram_lock(tp);
3283 tg3_enable_nvram_access(tp);
3285 tw32(NVRAM_ADDR, offset);
3286 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3287 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3290 *val = tr32(NVRAM_RDDATA);
3292 tg3_disable_nvram_access(tp);
3294 tg3_nvram_unlock(tp);
3299 /* Ensures NVRAM data is in bytestream format. */
3300 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3303 int res = tg3_nvram_read(tp, offset, &v);
3305 *val = cpu_to_be32(v);
3309 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3310 u32 offset, u32 len, u8 *buf)
3315 for (i = 0; i < len; i += 4) {
3321 memcpy(&data, buf + i, 4);
3324 * The SEEPROM interface expects the data to always be opposite
3325 * the native endian format. We accomplish this by reversing
3326 * all the operations that would have been performed on the
3327 * data from a call to tg3_nvram_read_be32().
3329 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3331 val = tr32(GRC_EEPROM_ADDR);
3332 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3334 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3336 tw32(GRC_EEPROM_ADDR, val |
3337 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3338 (addr & EEPROM_ADDR_ADDR_MASK) |
3342 for (j = 0; j < 1000; j++) {
3343 val = tr32(GRC_EEPROM_ADDR);
3345 if (val & EEPROM_ADDR_COMPLETE)
3349 if (!(val & EEPROM_ADDR_COMPLETE)) {
3358 /* offset and length are dword aligned */
3359 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3363 u32 pagesize = tp->nvram_pagesize;
3364 u32 pagemask = pagesize - 1;
3368 tmp = kmalloc(pagesize, GFP_KERNEL);
3374 u32 phy_addr, page_off, size;
3376 phy_addr = offset & ~pagemask;
3378 for (j = 0; j < pagesize; j += 4) {
3379 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3380 (__be32 *) (tmp + j));
3387 page_off = offset & pagemask;
3394 memcpy(tmp + page_off, buf, size);
3396 offset = offset + (pagesize - page_off);
3398 tg3_enable_nvram_access(tp);
3401 * Before we can erase the flash page, we need
3402 * to issue a special "write enable" command.
3404 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3406 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3409 /* Erase the target page */
3410 tw32(NVRAM_ADDR, phy_addr);
3412 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3413 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3415 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3418 /* Issue another write enable to start the write. */
3419 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3421 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3424 for (j = 0; j < pagesize; j += 4) {
3427 data = *((__be32 *) (tmp + j));
3429 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3431 tw32(NVRAM_ADDR, phy_addr + j);
3433 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3437 nvram_cmd |= NVRAM_CMD_FIRST;
3438 else if (j == (pagesize - 4))
3439 nvram_cmd |= NVRAM_CMD_LAST;
3441 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3449 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3450 tg3_nvram_exec_cmd(tp, nvram_cmd);
3457 /* offset and length are dword aligned */
3458 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3463 for (i = 0; i < len; i += 4, offset += 4) {
3464 u32 page_off, phy_addr, nvram_cmd;
3467 memcpy(&data, buf + i, 4);
3468 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3470 page_off = offset % tp->nvram_pagesize;
3472 phy_addr = tg3_nvram_phys_addr(tp, offset);
3474 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3476 if (page_off == 0 || i == 0)
3477 nvram_cmd |= NVRAM_CMD_FIRST;
3478 if (page_off == (tp->nvram_pagesize - 4))
3479 nvram_cmd |= NVRAM_CMD_LAST;
3482 nvram_cmd |= NVRAM_CMD_LAST;
3484 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3485 !tg3_flag(tp, FLASH) ||
3486 !tg3_flag(tp, 57765_PLUS))
3487 tw32(NVRAM_ADDR, phy_addr);
3489 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3490 !tg3_flag(tp, 5755_PLUS) &&
3491 (tp->nvram_jedecnum == JEDEC_ST) &&
3492 (nvram_cmd & NVRAM_CMD_FIRST)) {
3495 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3496 ret = tg3_nvram_exec_cmd(tp, cmd);
3500 if (!tg3_flag(tp, FLASH)) {
3501 /* We always do complete word writes to eeprom. */
3502 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3505 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3512 /* offset and length are dword aligned */
3513 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3517 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3518 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3519 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3523 if (!tg3_flag(tp, NVRAM)) {
3524 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3528 ret = tg3_nvram_lock(tp);
3532 tg3_enable_nvram_access(tp);
3533 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3534 tw32(NVRAM_WRITE1, 0x406);
3536 grc_mode = tr32(GRC_MODE);
3537 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3539 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3540 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3543 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3547 grc_mode = tr32(GRC_MODE);
3548 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3550 tg3_disable_nvram_access(tp);
3551 tg3_nvram_unlock(tp);
3554 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3555 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3562 #define RX_CPU_SCRATCH_BASE 0x30000
3563 #define RX_CPU_SCRATCH_SIZE 0x04000
3564 #define TX_CPU_SCRATCH_BASE 0x34000
3565 #define TX_CPU_SCRATCH_SIZE 0x04000
3567 /* tp->lock is held. */
3568 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3571 const int iters = 10000;
3573 for (i = 0; i < iters; i++) {
3574 tw32(cpu_base + CPU_STATE, 0xffffffff);
3575 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3576 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3578 if (pci_channel_offline(tp->pdev))
3582 return (i == iters) ? -EBUSY : 0;
3585 /* tp->lock is held. */
3586 static int tg3_rxcpu_pause(struct tg3 *tp)
3588 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3590 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3591 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3597 /* tp->lock is held. */
3598 static int tg3_txcpu_pause(struct tg3 *tp)
3600 return tg3_pause_cpu(tp, TX_CPU_BASE);
3603 /* tp->lock is held. */
3604 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3606 tw32(cpu_base + CPU_STATE, 0xffffffff);
3607 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3610 /* tp->lock is held. */
3611 static void tg3_rxcpu_resume(struct tg3 *tp)
3613 tg3_resume_cpu(tp, RX_CPU_BASE);
3616 /* tp->lock is held. */
3617 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3621 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3623 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3624 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3626 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3629 if (cpu_base == RX_CPU_BASE) {
3630 rc = tg3_rxcpu_pause(tp);
3633 * There is only an Rx CPU for the 5750 derivative in the
3636 if (tg3_flag(tp, IS_SSB_CORE))
3639 rc = tg3_txcpu_pause(tp);
3643 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3644 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3648 /* Clear firmware's nvram arbitration. */
3649 if (tg3_flag(tp, NVRAM))
3650 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3654 static int tg3_fw_data_len(struct tg3 *tp,
3655 const struct tg3_firmware_hdr *fw_hdr)
3659 /* Non fragmented firmware have one firmware header followed by a
3660 * contiguous chunk of data to be written. The length field in that
3661 * header is not the length of data to be written but the complete
3662 * length of the bss. The data length is determined based on
3663 * tp->fw->size minus headers.
3665 * Fragmented firmware have a main header followed by multiple
3666 * fragments. Each fragment is identical to non fragmented firmware
3667 * with a firmware header followed by a contiguous chunk of data. In
3668 * the main header, the length field is unused and set to 0xffffffff.
3669 * In each fragment header the length is the entire size of that
3670 * fragment i.e. fragment data + header length. Data length is
3671 * therefore length field in the header minus TG3_FW_HDR_LEN.
3673 if (tp->fw_len == 0xffffffff)
3674 fw_len = be32_to_cpu(fw_hdr->len);
3676 fw_len = tp->fw->size;
3678 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3681 /* tp->lock is held. */
3682 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3683 u32 cpu_scratch_base, int cpu_scratch_size,
3684 const struct tg3_firmware_hdr *fw_hdr)
3687 void (*write_op)(struct tg3 *, u32, u32);
3688 int total_len = tp->fw->size;
3690 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3692 "%s: Trying to load TX cpu firmware which is 5705\n",
3697 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3698 write_op = tg3_write_mem;
3700 write_op = tg3_write_indirect_reg32;
3702 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3703 /* It is possible that bootcode is still loading at this point.
3704 * Get the nvram lock first before halting the cpu.
3706 int lock_err = tg3_nvram_lock(tp);
3707 err = tg3_halt_cpu(tp, cpu_base);
3709 tg3_nvram_unlock(tp);
3713 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3714 write_op(tp, cpu_scratch_base + i, 0);
3715 tw32(cpu_base + CPU_STATE, 0xffffffff);
3716 tw32(cpu_base + CPU_MODE,
3717 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3719 /* Subtract additional main header for fragmented firmware and
3720 * advance to the first fragment
3722 total_len -= TG3_FW_HDR_LEN;
3727 u32 *fw_data = (u32 *)(fw_hdr + 1);
3728 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3729 write_op(tp, cpu_scratch_base +
3730 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3732 be32_to_cpu(fw_data[i]));
3734 total_len -= be32_to_cpu(fw_hdr->len);
3736 /* Advance to next fragment */
3737 fw_hdr = (struct tg3_firmware_hdr *)
3738 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3739 } while (total_len > 0);
3747 /* tp->lock is held. */
3748 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3751 const int iters = 5;
3753 tw32(cpu_base + CPU_STATE, 0xffffffff);
3754 tw32_f(cpu_base + CPU_PC, pc);
3756 for (i = 0; i < iters; i++) {
3757 if (tr32(cpu_base + CPU_PC) == pc)
3759 tw32(cpu_base + CPU_STATE, 0xffffffff);
3760 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3761 tw32_f(cpu_base + CPU_PC, pc);
3765 return (i == iters) ? -EBUSY : 0;
3768 /* tp->lock is held. */
3769 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3771 const struct tg3_firmware_hdr *fw_hdr;
3774 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3776 /* Firmware blob starts with version numbers, followed by
3777 start address and length. We are setting complete length.
3778 length = end_address_of_bss - start_address_of_text.
3779 Remainder is the blob to be loaded contiguously
3780 from start address. */
3782 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3783 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3788 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3789 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3794 /* Now startup only the RX cpu. */
3795 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3796 be32_to_cpu(fw_hdr->base_addr));
3798 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3799 "should be %08x\n", __func__,
3800 tr32(RX_CPU_BASE + CPU_PC),
3801 be32_to_cpu(fw_hdr->base_addr));
3805 tg3_rxcpu_resume(tp);
3810 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3812 const int iters = 1000;
3816 /* Wait for boot code to complete initialization and enter service
3817 * loop. It is then safe to download service patches
3819 for (i = 0; i < iters; i++) {
3820 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3827 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3831 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3833 netdev_warn(tp->dev,
3834 "Other patches exist. Not downloading EEE patch\n");
3841 /* tp->lock is held. */
3842 static void tg3_load_57766_firmware(struct tg3 *tp)
3844 struct tg3_firmware_hdr *fw_hdr;
3846 if (!tg3_flag(tp, NO_NVRAM))
3849 if (tg3_validate_rxcpu_state(tp))
3855 /* This firmware blob has a different format than older firmware
3856 * releases as given below. The main difference is we have fragmented
3857 * data to be written to non-contiguous locations.
3859 * In the beginning we have a firmware header identical to other
3860 * firmware which consists of version, base addr and length. The length
3861 * here is unused and set to 0xffffffff.
3863 * This is followed by a series of firmware fragments which are
3864 * individually identical to previous firmware. i.e. they have the
3865 * firmware header and followed by data for that fragment. The version
3866 * field of the individual fragment header is unused.
3869 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3870 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3873 if (tg3_rxcpu_pause(tp))
3876 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3877 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3879 tg3_rxcpu_resume(tp);
3882 /* tp->lock is held. */
3883 static int tg3_load_tso_firmware(struct tg3 *tp)
3885 const struct tg3_firmware_hdr *fw_hdr;
3886 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3889 if (!tg3_flag(tp, FW_TSO))
3892 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3894 /* Firmware blob starts with version numbers, followed by
3895 start address and length. We are setting complete length.
3896 length = end_address_of_bss - start_address_of_text.
3897 Remainder is the blob to be loaded contiguously
3898 from start address. */
3900 cpu_scratch_size = tp->fw_len;
3902 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3903 cpu_base = RX_CPU_BASE;
3904 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3906 cpu_base = TX_CPU_BASE;
3907 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3908 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3911 err = tg3_load_firmware_cpu(tp, cpu_base,
3912 cpu_scratch_base, cpu_scratch_size,
3917 /* Now startup the cpu. */
3918 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3919 be32_to_cpu(fw_hdr->base_addr));
3922 "%s fails to set CPU PC, is %08x should be %08x\n",
3923 __func__, tr32(cpu_base + CPU_PC),
3924 be32_to_cpu(fw_hdr->base_addr));
3928 tg3_resume_cpu(tp, cpu_base);
3932 /* tp->lock is held. */
3933 static void __tg3_set_one_mac_addr(struct tg3 *tp, const u8 *mac_addr,
3936 u32 addr_high, addr_low;
3938 addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3939 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3940 (mac_addr[4] << 8) | mac_addr[5]);
3943 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3944 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3947 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3948 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3952 /* tp->lock is held. */
3953 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3958 for (i = 0; i < 4; i++) {
3959 if (i == 1 && skip_mac_1)
3961 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3964 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3965 tg3_asic_rev(tp) == ASIC_REV_5704) {
3966 for (i = 4; i < 16; i++)
3967 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3970 addr_high = (tp->dev->dev_addr[0] +
3971 tp->dev->dev_addr[1] +
3972 tp->dev->dev_addr[2] +
3973 tp->dev->dev_addr[3] +
3974 tp->dev->dev_addr[4] +
3975 tp->dev->dev_addr[5]) &
3976 TX_BACKOFF_SEED_MASK;
3977 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3980 static void tg3_enable_register_access(struct tg3 *tp)
3983 * Make sure register accesses (indirect or otherwise) will function
3986 pci_write_config_dword(tp->pdev,
3987 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3990 static int tg3_power_up(struct tg3 *tp)
3994 tg3_enable_register_access(tp);
3996 err = pci_set_power_state(tp->pdev, PCI_D0);
3998 /* Switch out of Vaux if it is a NIC */
3999 tg3_pwrsrc_switch_to_vmain(tp);
4001 netdev_err(tp->dev, "Transition to D0 failed\n");
4007 static int tg3_setup_phy(struct tg3 *, bool);
4009 static int tg3_power_down_prepare(struct tg3 *tp)
4012 bool device_should_wake, do_low_power;
4014 tg3_enable_register_access(tp);
4016 /* Restore the CLKREQ setting. */
4017 if (tg3_flag(tp, CLKREQ_BUG))
4018 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4019 PCI_EXP_LNKCTL_CLKREQ_EN);
4021 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4022 tw32(TG3PCI_MISC_HOST_CTRL,
4023 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4025 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4026 tg3_flag(tp, WOL_ENABLE);
4028 if (tg3_flag(tp, USE_PHYLIB)) {
4029 do_low_power = false;
4030 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4031 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4032 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
4033 struct phy_device *phydev;
4036 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4038 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4040 tp->link_config.speed = phydev->speed;
4041 tp->link_config.duplex = phydev->duplex;
4042 tp->link_config.autoneg = phydev->autoneg;
4043 ethtool_convert_link_mode_to_legacy_u32(
4044 &tp->link_config.advertising,
4045 phydev->advertising);
4047 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
4048 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
4050 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
4052 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
4055 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4056 if (tg3_flag(tp, WOL_SPEED_100MB)) {
4057 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
4059 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
4061 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4064 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4069 linkmode_copy(phydev->advertising, advertising);
4070 phy_start_aneg(phydev);
4072 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4073 if (phyid != PHY_ID_BCMAC131) {
4074 phyid &= PHY_BCM_OUI_MASK;
4075 if (phyid == PHY_BCM_OUI_1 ||
4076 phyid == PHY_BCM_OUI_2 ||
4077 phyid == PHY_BCM_OUI_3)
4078 do_low_power = true;
4082 do_low_power = true;
4084 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4085 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4087 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4088 tg3_setup_phy(tp, false);
4091 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4094 val = tr32(GRC_VCPU_EXT_CTRL);
4095 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4096 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4100 for (i = 0; i < 200; i++) {
4101 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4102 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4107 if (tg3_flag(tp, WOL_CAP))
4108 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4109 WOL_DRV_STATE_SHUTDOWN |
4113 if (device_should_wake) {
4116 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4118 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4119 tg3_phy_auxctl_write(tp,
4120 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4121 MII_TG3_AUXCTL_PCTL_WOL_EN |
4122 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4123 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4127 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4128 mac_mode = MAC_MODE_PORT_MODE_GMII;
4129 else if (tp->phy_flags &
4130 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4131 if (tp->link_config.active_speed == SPEED_1000)
4132 mac_mode = MAC_MODE_PORT_MODE_GMII;
4134 mac_mode = MAC_MODE_PORT_MODE_MII;
4136 mac_mode = MAC_MODE_PORT_MODE_MII;
4138 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4139 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4140 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4141 SPEED_100 : SPEED_10;
4142 if (tg3_5700_link_polarity(tp, speed))
4143 mac_mode |= MAC_MODE_LINK_POLARITY;
4145 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4148 mac_mode = MAC_MODE_PORT_MODE_TBI;
4151 if (!tg3_flag(tp, 5750_PLUS))
4152 tw32(MAC_LED_CTRL, tp->led_ctrl);
4154 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4155 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4156 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4157 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4159 if (tg3_flag(tp, ENABLE_APE))
4160 mac_mode |= MAC_MODE_APE_TX_EN |
4161 MAC_MODE_APE_RX_EN |
4162 MAC_MODE_TDE_ENABLE;
4164 tw32_f(MAC_MODE, mac_mode);
4167 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4171 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4172 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4173 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4176 base_val = tp->pci_clock_ctrl;
4177 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4178 CLOCK_CTRL_TXCLK_DISABLE);
4180 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4181 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4182 } else if (tg3_flag(tp, 5780_CLASS) ||
4183 tg3_flag(tp, CPMU_PRESENT) ||
4184 tg3_asic_rev(tp) == ASIC_REV_5906) {
4186 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4187 u32 newbits1, newbits2;
4189 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4190 tg3_asic_rev(tp) == ASIC_REV_5701) {
4191 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4192 CLOCK_CTRL_TXCLK_DISABLE |
4194 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4195 } else if (tg3_flag(tp, 5705_PLUS)) {
4196 newbits1 = CLOCK_CTRL_625_CORE;
4197 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4199 newbits1 = CLOCK_CTRL_ALTCLK;
4200 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4203 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4206 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4209 if (!tg3_flag(tp, 5705_PLUS)) {
4212 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4213 tg3_asic_rev(tp) == ASIC_REV_5701) {
4214 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4215 CLOCK_CTRL_TXCLK_DISABLE |
4216 CLOCK_CTRL_44MHZ_CORE);
4218 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4221 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4222 tp->pci_clock_ctrl | newbits3, 40);
4226 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4227 tg3_power_down_phy(tp, do_low_power);
4229 tg3_frob_aux_power(tp, true);
4231 /* Workaround for unstable PLL clock */
4232 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4233 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4234 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4235 u32 val = tr32(0x7d00);
4237 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4239 if (!tg3_flag(tp, ENABLE_ASF)) {
4242 err = tg3_nvram_lock(tp);
4243 tg3_halt_cpu(tp, RX_CPU_BASE);
4245 tg3_nvram_unlock(tp);
4249 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4251 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4256 static void tg3_power_down(struct tg3 *tp)
4258 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4259 pci_set_power_state(tp->pdev, PCI_D3hot);
4262 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
4264 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4265 case MII_TG3_AUX_STAT_10HALF:
4267 *duplex = DUPLEX_HALF;
4270 case MII_TG3_AUX_STAT_10FULL:
4272 *duplex = DUPLEX_FULL;
4275 case MII_TG3_AUX_STAT_100HALF:
4277 *duplex = DUPLEX_HALF;
4280 case MII_TG3_AUX_STAT_100FULL:
4282 *duplex = DUPLEX_FULL;
4285 case MII_TG3_AUX_STAT_1000HALF:
4286 *speed = SPEED_1000;
4287 *duplex = DUPLEX_HALF;
4290 case MII_TG3_AUX_STAT_1000FULL:
4291 *speed = SPEED_1000;
4292 *duplex = DUPLEX_FULL;
4296 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4297 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4299 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4303 *speed = SPEED_UNKNOWN;
4304 *duplex = DUPLEX_UNKNOWN;
4309 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4314 new_adv = ADVERTISE_CSMA;
4315 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4316 new_adv |= mii_advertise_flowctrl(flowctrl);
4318 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4322 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4323 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4325 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4326 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4327 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4329 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4334 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4337 tw32(TG3_CPMU_EEE_MODE,
4338 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4340 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4345 /* Advertise 100-BaseTX EEE ability */
4346 if (advertise & ADVERTISED_100baseT_Full)
4347 val |= MDIO_AN_EEE_ADV_100TX;
4348 /* Advertise 1000-BaseT EEE ability */
4349 if (advertise & ADVERTISED_1000baseT_Full)
4350 val |= MDIO_AN_EEE_ADV_1000T;
4352 if (!tp->eee.eee_enabled) {
4354 tp->eee.advertised = 0;
4356 tp->eee.advertised = advertise &
4357 (ADVERTISED_100baseT_Full |
4358 ADVERTISED_1000baseT_Full);
4361 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4365 switch (tg3_asic_rev(tp)) {
4367 case ASIC_REV_57765:
4368 case ASIC_REV_57766:
4370 /* If we advertised any eee advertisements above... */
4372 val = MII_TG3_DSP_TAP26_ALNOKO |
4373 MII_TG3_DSP_TAP26_RMRXSTO |
4374 MII_TG3_DSP_TAP26_OPCSINPT;
4375 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4379 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4380 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4381 MII_TG3_DSP_CH34TP2_HIBW01);
4384 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4393 static void tg3_phy_copper_begin(struct tg3 *tp)
4395 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4396 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4399 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4400 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4401 adv = ADVERTISED_10baseT_Half |
4402 ADVERTISED_10baseT_Full;
4403 if (tg3_flag(tp, WOL_SPEED_100MB))
4404 adv |= ADVERTISED_100baseT_Half |
4405 ADVERTISED_100baseT_Full;
4406 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4407 if (!(tp->phy_flags &
4408 TG3_PHYFLG_DISABLE_1G_HD_ADV))
4409 adv |= ADVERTISED_1000baseT_Half;
4410 adv |= ADVERTISED_1000baseT_Full;
4413 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4415 adv = tp->link_config.advertising;
4416 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4417 adv &= ~(ADVERTISED_1000baseT_Half |
4418 ADVERTISED_1000baseT_Full);
4420 fc = tp->link_config.flowctrl;
4423 tg3_phy_autoneg_cfg(tp, adv, fc);
4425 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4426 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4427 /* Normally during power down we want to autonegotiate
4428 * the lowest possible speed for WOL. However, to avoid
4429 * link flap, we leave it untouched.
4434 tg3_writephy(tp, MII_BMCR,
4435 BMCR_ANENABLE | BMCR_ANRESTART);
4438 u32 bmcr, orig_bmcr;
4440 tp->link_config.active_speed = tp->link_config.speed;
4441 tp->link_config.active_duplex = tp->link_config.duplex;
4443 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4444 /* With autoneg disabled, 5715 only links up when the
4445 * advertisement register has the configured speed
4448 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4452 switch (tp->link_config.speed) {
4458 bmcr |= BMCR_SPEED100;
4462 bmcr |= BMCR_SPEED1000;
4466 if (tp->link_config.duplex == DUPLEX_FULL)
4467 bmcr |= BMCR_FULLDPLX;
4469 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4470 (bmcr != orig_bmcr)) {
4471 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4472 for (i = 0; i < 1500; i++) {
4476 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4477 tg3_readphy(tp, MII_BMSR, &tmp))
4479 if (!(tmp & BMSR_LSTATUS)) {
4484 tg3_writephy(tp, MII_BMCR, bmcr);
4490 static int tg3_phy_pull_config(struct tg3 *tp)
4495 err = tg3_readphy(tp, MII_BMCR, &val);
4499 if (!(val & BMCR_ANENABLE)) {
4500 tp->link_config.autoneg = AUTONEG_DISABLE;
4501 tp->link_config.advertising = 0;
4502 tg3_flag_clear(tp, PAUSE_AUTONEG);
4506 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4508 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4511 tp->link_config.speed = SPEED_10;
4514 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4517 tp->link_config.speed = SPEED_100;
4519 case BMCR_SPEED1000:
4520 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4521 tp->link_config.speed = SPEED_1000;
4529 if (val & BMCR_FULLDPLX)
4530 tp->link_config.duplex = DUPLEX_FULL;
4532 tp->link_config.duplex = DUPLEX_HALF;
4534 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4540 tp->link_config.autoneg = AUTONEG_ENABLE;
4541 tp->link_config.advertising = ADVERTISED_Autoneg;
4542 tg3_flag_set(tp, PAUSE_AUTONEG);
4544 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4547 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4551 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4552 tp->link_config.advertising |= adv | ADVERTISED_TP;
4554 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4556 tp->link_config.advertising |= ADVERTISED_FIBRE;
4559 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4562 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4563 err = tg3_readphy(tp, MII_CTRL1000, &val);
4567 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4569 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4573 adv = tg3_decode_flowctrl_1000X(val);
4574 tp->link_config.flowctrl = adv;
4576 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4577 adv = mii_adv_to_ethtool_adv_x(val);
4580 tp->link_config.advertising |= adv;
4587 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4591 /* Turn off tap power management. */
4592 /* Set Extended packet length bit */
4593 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4595 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4596 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4597 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4598 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4599 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4606 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4608 struct ethtool_eee eee;
4610 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4613 tg3_eee_pull_config(tp, &eee);
4615 if (tp->eee.eee_enabled) {
4616 if (tp->eee.advertised != eee.advertised ||
4617 tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4618 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4621 /* EEE is disabled but we're advertising */
4629 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4631 u32 advmsk, tgtadv, advertising;
4633 advertising = tp->link_config.advertising;
4634 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4636 advmsk = ADVERTISE_ALL;
4637 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4638 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4639 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4642 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4645 if ((*lcladv & advmsk) != tgtadv)
4648 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4651 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4653 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4657 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4658 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4659 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4660 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4661 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4663 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4666 if (tg3_ctrl != tgtadv)
4673 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4677 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4680 if (tg3_readphy(tp, MII_STAT1000, &val))
4683 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4686 if (tg3_readphy(tp, MII_LPA, rmtadv))
4689 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4690 tp->link_config.rmt_adv = lpeth;
4695 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4697 if (curr_link_up != tp->link_up) {
4699 netif_carrier_on(tp->dev);
4701 netif_carrier_off(tp->dev);
4702 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4703 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4706 tg3_link_report(tp);
4713 static void tg3_clear_mac_status(struct tg3 *tp)
4718 MAC_STATUS_SYNC_CHANGED |
4719 MAC_STATUS_CFG_CHANGED |
4720 MAC_STATUS_MI_COMPLETION |
4721 MAC_STATUS_LNKSTATE_CHANGED);
4725 static void tg3_setup_eee(struct tg3 *tp)
4729 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4730 TG3_CPMU_EEE_LNKIDL_UART_IDL;
4731 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4732 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4734 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4736 tw32_f(TG3_CPMU_EEE_CTRL,
4737 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4739 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4740 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4741 TG3_CPMU_EEEMD_LPI_IN_RX |
4742 TG3_CPMU_EEEMD_EEE_ENABLE;
4744 if (tg3_asic_rev(tp) != ASIC_REV_5717)
4745 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4747 if (tg3_flag(tp, ENABLE_APE))
4748 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4750 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4752 tw32_f(TG3_CPMU_EEE_DBTMR1,
4753 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4754 (tp->eee.tx_lpi_timer & 0xffff));
4756 tw32_f(TG3_CPMU_EEE_DBTMR2,
4757 TG3_CPMU_DBTMR2_APE_TX_2047US |
4758 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4761 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4763 bool current_link_up;
4765 u32 lcl_adv, rmt_adv;
4770 tg3_clear_mac_status(tp);
4772 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4774 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4778 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4780 /* Some third-party PHYs need to be reset on link going
4783 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4784 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4785 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4787 tg3_readphy(tp, MII_BMSR, &bmsr);
4788 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4789 !(bmsr & BMSR_LSTATUS))
4795 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4796 tg3_readphy(tp, MII_BMSR, &bmsr);
4797 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4798 !tg3_flag(tp, INIT_COMPLETE))
4801 if (!(bmsr & BMSR_LSTATUS)) {
4802 err = tg3_init_5401phy_dsp(tp);
4806 tg3_readphy(tp, MII_BMSR, &bmsr);
4807 for (i = 0; i < 1000; i++) {
4809 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4810 (bmsr & BMSR_LSTATUS)) {
4816 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4817 TG3_PHY_REV_BCM5401_B0 &&
4818 !(bmsr & BMSR_LSTATUS) &&
4819 tp->link_config.active_speed == SPEED_1000) {
4820 err = tg3_phy_reset(tp);
4822 err = tg3_init_5401phy_dsp(tp);
4827 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4828 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4829 /* 5701 {A0,B0} CRC bug workaround */
4830 tg3_writephy(tp, 0x15, 0x0a75);
4831 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4832 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4833 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4836 /* Clear pending interrupts... */
4837 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4838 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4840 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4841 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4842 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4843 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4845 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4846 tg3_asic_rev(tp) == ASIC_REV_5701) {
4847 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4848 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4849 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4851 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4854 current_link_up = false;
4855 current_speed = SPEED_UNKNOWN;
4856 current_duplex = DUPLEX_UNKNOWN;
4857 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4858 tp->link_config.rmt_adv = 0;
4860 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4861 err = tg3_phy_auxctl_read(tp,
4862 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4864 if (!err && !(val & (1 << 10))) {
4865 tg3_phy_auxctl_write(tp,
4866 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4873 for (i = 0; i < 100; i++) {
4874 tg3_readphy(tp, MII_BMSR, &bmsr);
4875 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4876 (bmsr & BMSR_LSTATUS))
4881 if (bmsr & BMSR_LSTATUS) {
4884 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4885 for (i = 0; i < 2000; i++) {
4887 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4892 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4897 for (i = 0; i < 200; i++) {
4898 tg3_readphy(tp, MII_BMCR, &bmcr);
4899 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4901 if (bmcr && bmcr != 0x7fff)
4909 tp->link_config.active_speed = current_speed;
4910 tp->link_config.active_duplex = current_duplex;
4912 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4913 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4915 if ((bmcr & BMCR_ANENABLE) &&
4917 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4918 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4919 current_link_up = true;
4921 /* EEE settings changes take effect only after a phy
4922 * reset. If we have skipped a reset due to Link Flap
4923 * Avoidance being enabled, do it now.
4925 if (!eee_config_ok &&
4926 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4932 if (!(bmcr & BMCR_ANENABLE) &&
4933 tp->link_config.speed == current_speed &&
4934 tp->link_config.duplex == current_duplex) {
4935 current_link_up = true;
4939 if (current_link_up &&
4940 tp->link_config.active_duplex == DUPLEX_FULL) {
4943 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4944 reg = MII_TG3_FET_GEN_STAT;
4945 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4947 reg = MII_TG3_EXT_STAT;
4948 bit = MII_TG3_EXT_STAT_MDIX;
4951 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4952 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4954 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4959 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4960 tg3_phy_copper_begin(tp);
4962 if (tg3_flag(tp, ROBOSWITCH)) {
4963 current_link_up = true;
4964 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4965 current_speed = SPEED_1000;
4966 current_duplex = DUPLEX_FULL;
4967 tp->link_config.active_speed = current_speed;
4968 tp->link_config.active_duplex = current_duplex;
4971 tg3_readphy(tp, MII_BMSR, &bmsr);
4972 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4973 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4974 current_link_up = true;
4977 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4978 if (current_link_up) {
4979 if (tp->link_config.active_speed == SPEED_100 ||
4980 tp->link_config.active_speed == SPEED_10)
4981 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4983 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4984 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4985 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4987 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4989 /* In order for the 5750 core in BCM4785 chip to work properly
4990 * in RGMII mode, the Led Control Register must be set up.
4992 if (tg3_flag(tp, RGMII_MODE)) {
4993 u32 led_ctrl = tr32(MAC_LED_CTRL);
4994 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4996 if (tp->link_config.active_speed == SPEED_10)
4997 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4998 else if (tp->link_config.active_speed == SPEED_100)
4999 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5000 LED_CTRL_100MBPS_ON);
5001 else if (tp->link_config.active_speed == SPEED_1000)
5002 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5003 LED_CTRL_1000MBPS_ON);
5005 tw32(MAC_LED_CTRL, led_ctrl);
5009 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5010 if (tp->link_config.active_duplex == DUPLEX_HALF)
5011 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5013 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5014 if (current_link_up &&
5015 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5016 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5018 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5021 /* ??? Without this setting Netgear GA302T PHY does not
5022 * ??? send/receive packets...
5024 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5025 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5026 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5027 tw32_f(MAC_MI_MODE, tp->mi_mode);
5031 tw32_f(MAC_MODE, tp->mac_mode);
5034 tg3_phy_eee_adjust(tp, current_link_up);
5036 if (tg3_flag(tp, USE_LINKCHG_REG)) {
5037 /* Polled via timer. */
5038 tw32_f(MAC_EVENT, 0);
5040 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5044 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5046 tp->link_config.active_speed == SPEED_1000 &&
5047 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5050 (MAC_STATUS_SYNC_CHANGED |
5051 MAC_STATUS_CFG_CHANGED));
5054 NIC_SRAM_FIRMWARE_MBOX,
5055 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5058 /* Prevent send BD corruption. */
5059 if (tg3_flag(tp, CLKREQ_BUG)) {
5060 if (tp->link_config.active_speed == SPEED_100 ||
5061 tp->link_config.active_speed == SPEED_10)
5062 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5063 PCI_EXP_LNKCTL_CLKREQ_EN);
5065 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5066 PCI_EXP_LNKCTL_CLKREQ_EN);
5069 tg3_test_and_report_link_chg(tp, current_link_up);
5074 struct tg3_fiber_aneginfo {
5076 #define ANEG_STATE_UNKNOWN 0
5077 #define ANEG_STATE_AN_ENABLE 1
5078 #define ANEG_STATE_RESTART_INIT 2
5079 #define ANEG_STATE_RESTART 3
5080 #define ANEG_STATE_DISABLE_LINK_OK 4
5081 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5082 #define ANEG_STATE_ABILITY_DETECT 6
5083 #define ANEG_STATE_ACK_DETECT_INIT 7
5084 #define ANEG_STATE_ACK_DETECT 8
5085 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5086 #define ANEG_STATE_COMPLETE_ACK 10
5087 #define ANEG_STATE_IDLE_DETECT_INIT 11
5088 #define ANEG_STATE_IDLE_DETECT 12
5089 #define ANEG_STATE_LINK_OK 13
5090 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5091 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5094 #define MR_AN_ENABLE 0x00000001
5095 #define MR_RESTART_AN 0x00000002
5096 #define MR_AN_COMPLETE 0x00000004
5097 #define MR_PAGE_RX 0x00000008
5098 #define MR_NP_LOADED 0x00000010
5099 #define MR_TOGGLE_TX 0x00000020
5100 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5101 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5102 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5103 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5104 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5105 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5106 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5107 #define MR_TOGGLE_RX 0x00002000
5108 #define MR_NP_RX 0x00004000
5110 #define MR_LINK_OK 0x80000000
5112 unsigned long link_time, cur_time;
5114 u32 ability_match_cfg;
5115 int ability_match_count;
5117 char ability_match, idle_match, ack_match;
5119 u32 txconfig, rxconfig;
5120 #define ANEG_CFG_NP 0x00000080
5121 #define ANEG_CFG_ACK 0x00000040
5122 #define ANEG_CFG_RF2 0x00000020
5123 #define ANEG_CFG_RF1 0x00000010
5124 #define ANEG_CFG_PS2 0x00000001
5125 #define ANEG_CFG_PS1 0x00008000
5126 #define ANEG_CFG_HD 0x00004000
5127 #define ANEG_CFG_FD 0x00002000
5128 #define ANEG_CFG_INVAL 0x00001f06
5133 #define ANEG_TIMER_ENAB 2
5134 #define ANEG_FAILED -1
5136 #define ANEG_STATE_SETTLE_TIME 10000
5138 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5139 struct tg3_fiber_aneginfo *ap)
5142 unsigned long delta;
5146 if (ap->state == ANEG_STATE_UNKNOWN) {
5150 ap->ability_match_cfg = 0;
5151 ap->ability_match_count = 0;
5152 ap->ability_match = 0;
5158 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5159 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5161 if (rx_cfg_reg != ap->ability_match_cfg) {
5162 ap->ability_match_cfg = rx_cfg_reg;
5163 ap->ability_match = 0;
5164 ap->ability_match_count = 0;
5166 if (++ap->ability_match_count > 1) {
5167 ap->ability_match = 1;
5168 ap->ability_match_cfg = rx_cfg_reg;
5171 if (rx_cfg_reg & ANEG_CFG_ACK)
5179 ap->ability_match_cfg = 0;
5180 ap->ability_match_count = 0;
5181 ap->ability_match = 0;
5187 ap->rxconfig = rx_cfg_reg;
5190 switch (ap->state) {
5191 case ANEG_STATE_UNKNOWN:
5192 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5193 ap->state = ANEG_STATE_AN_ENABLE;
5196 case ANEG_STATE_AN_ENABLE:
5197 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5198 if (ap->flags & MR_AN_ENABLE) {
5201 ap->ability_match_cfg = 0;
5202 ap->ability_match_count = 0;
5203 ap->ability_match = 0;
5207 ap->state = ANEG_STATE_RESTART_INIT;
5209 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5213 case ANEG_STATE_RESTART_INIT:
5214 ap->link_time = ap->cur_time;
5215 ap->flags &= ~(MR_NP_LOADED);
5217 tw32(MAC_TX_AUTO_NEG, 0);
5218 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5219 tw32_f(MAC_MODE, tp->mac_mode);
5222 ret = ANEG_TIMER_ENAB;
5223 ap->state = ANEG_STATE_RESTART;
5226 case ANEG_STATE_RESTART:
5227 delta = ap->cur_time - ap->link_time;
5228 if (delta > ANEG_STATE_SETTLE_TIME)
5229 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5231 ret = ANEG_TIMER_ENAB;
5234 case ANEG_STATE_DISABLE_LINK_OK:
5238 case ANEG_STATE_ABILITY_DETECT_INIT:
5239 ap->flags &= ~(MR_TOGGLE_TX);
5240 ap->txconfig = ANEG_CFG_FD;
5241 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5242 if (flowctrl & ADVERTISE_1000XPAUSE)
5243 ap->txconfig |= ANEG_CFG_PS1;
5244 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5245 ap->txconfig |= ANEG_CFG_PS2;
5246 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5247 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5248 tw32_f(MAC_MODE, tp->mac_mode);
5251 ap->state = ANEG_STATE_ABILITY_DETECT;
5254 case ANEG_STATE_ABILITY_DETECT:
5255 if (ap->ability_match != 0 && ap->rxconfig != 0)
5256 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5259 case ANEG_STATE_ACK_DETECT_INIT:
5260 ap->txconfig |= ANEG_CFG_ACK;
5261 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5262 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5263 tw32_f(MAC_MODE, tp->mac_mode);
5266 ap->state = ANEG_STATE_ACK_DETECT;
5269 case ANEG_STATE_ACK_DETECT:
5270 if (ap->ack_match != 0) {
5271 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5272 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5273 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5275 ap->state = ANEG_STATE_AN_ENABLE;
5277 } else if (ap->ability_match != 0 &&
5278 ap->rxconfig == 0) {
5279 ap->state = ANEG_STATE_AN_ENABLE;
5283 case ANEG_STATE_COMPLETE_ACK_INIT:
5284 if (ap->rxconfig & ANEG_CFG_INVAL) {
5288 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5289 MR_LP_ADV_HALF_DUPLEX |
5290 MR_LP_ADV_SYM_PAUSE |
5291 MR_LP_ADV_ASYM_PAUSE |
5292 MR_LP_ADV_REMOTE_FAULT1 |
5293 MR_LP_ADV_REMOTE_FAULT2 |
5294 MR_LP_ADV_NEXT_PAGE |
5297 if (ap->rxconfig & ANEG_CFG_FD)
5298 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5299 if (ap->rxconfig & ANEG_CFG_HD)
5300 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5301 if (ap->rxconfig & ANEG_CFG_PS1)
5302 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5303 if (ap->rxconfig & ANEG_CFG_PS2)
5304 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5305 if (ap->rxconfig & ANEG_CFG_RF1)
5306 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5307 if (ap->rxconfig & ANEG_CFG_RF2)
5308 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5309 if (ap->rxconfig & ANEG_CFG_NP)
5310 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5312 ap->link_time = ap->cur_time;
5314 ap->flags ^= (MR_TOGGLE_TX);
5315 if (ap->rxconfig & 0x0008)
5316 ap->flags |= MR_TOGGLE_RX;
5317 if (ap->rxconfig & ANEG_CFG_NP)
5318 ap->flags |= MR_NP_RX;
5319 ap->flags |= MR_PAGE_RX;
5321 ap->state = ANEG_STATE_COMPLETE_ACK;
5322 ret = ANEG_TIMER_ENAB;
5325 case ANEG_STATE_COMPLETE_ACK:
5326 if (ap->ability_match != 0 &&
5327 ap->rxconfig == 0) {
5328 ap->state = ANEG_STATE_AN_ENABLE;
5331 delta = ap->cur_time - ap->link_time;
5332 if (delta > ANEG_STATE_SETTLE_TIME) {
5333 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5334 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5336 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5337 !(ap->flags & MR_NP_RX)) {
5338 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5346 case ANEG_STATE_IDLE_DETECT_INIT:
5347 ap->link_time = ap->cur_time;
5348 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5349 tw32_f(MAC_MODE, tp->mac_mode);
5352 ap->state = ANEG_STATE_IDLE_DETECT;
5353 ret = ANEG_TIMER_ENAB;
5356 case ANEG_STATE_IDLE_DETECT:
5357 if (ap->ability_match != 0 &&
5358 ap->rxconfig == 0) {
5359 ap->state = ANEG_STATE_AN_ENABLE;
5362 delta = ap->cur_time - ap->link_time;
5363 if (delta > ANEG_STATE_SETTLE_TIME) {
5364 /* XXX another gem from the Broadcom driver :( */
5365 ap->state = ANEG_STATE_LINK_OK;
5369 case ANEG_STATE_LINK_OK:
5370 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5374 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5375 /* ??? unimplemented */
5378 case ANEG_STATE_NEXT_PAGE_WAIT:
5379 /* ??? unimplemented */
5390 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5393 struct tg3_fiber_aneginfo aninfo;
5394 int status = ANEG_FAILED;
5398 tw32_f(MAC_TX_AUTO_NEG, 0);
5400 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5401 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5404 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5407 memset(&aninfo, 0, sizeof(aninfo));
5408 aninfo.flags |= MR_AN_ENABLE;
5409 aninfo.state = ANEG_STATE_UNKNOWN;
5410 aninfo.cur_time = 0;
5412 while (++tick < 195000) {
5413 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5414 if (status == ANEG_DONE || status == ANEG_FAILED)
5420 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5421 tw32_f(MAC_MODE, tp->mac_mode);
5424 *txflags = aninfo.txconfig;
5425 *rxflags = aninfo.flags;
5427 if (status == ANEG_DONE &&
5428 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5429 MR_LP_ADV_FULL_DUPLEX)))
5435 static void tg3_init_bcm8002(struct tg3 *tp)
5437 u32 mac_status = tr32(MAC_STATUS);
5440 /* Reset when initting first time or we have a link. */
5441 if (tg3_flag(tp, INIT_COMPLETE) &&
5442 !(mac_status & MAC_STATUS_PCS_SYNCED))
5445 /* Set PLL lock range. */
5446 tg3_writephy(tp, 0x16, 0x8007);
5449 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5451 /* Wait for reset to complete. */
5452 /* XXX schedule_timeout() ... */
5453 for (i = 0; i < 500; i++)
5456 /* Config mode; select PMA/Ch 1 regs. */
5457 tg3_writephy(tp, 0x10, 0x8411);
5459 /* Enable auto-lock and comdet, select txclk for tx. */
5460 tg3_writephy(tp, 0x11, 0x0a10);
5462 tg3_writephy(tp, 0x18, 0x00a0);
5463 tg3_writephy(tp, 0x16, 0x41ff);
5465 /* Assert and deassert POR. */
5466 tg3_writephy(tp, 0x13, 0x0400);
5468 tg3_writephy(tp, 0x13, 0x0000);
5470 tg3_writephy(tp, 0x11, 0x0a50);
5472 tg3_writephy(tp, 0x11, 0x0a10);
5474 /* Wait for signal to stabilize */
5475 /* XXX schedule_timeout() ... */
5476 for (i = 0; i < 15000; i++)
5479 /* Deselect the channel register so we can read the PHYID
5482 tg3_writephy(tp, 0x10, 0x8011);
5485 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5488 bool current_link_up;
5489 u32 sg_dig_ctrl, sg_dig_status;
5490 u32 serdes_cfg, expected_sg_dig_ctrl;
5491 int workaround, port_a;
5496 current_link_up = false;
5498 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5499 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5501 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5504 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5505 /* preserve bits 20-23 for voltage regulator */
5506 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5509 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5511 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5512 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5514 u32 val = serdes_cfg;
5520 tw32_f(MAC_SERDES_CFG, val);
5523 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5525 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5526 tg3_setup_flow_control(tp, 0, 0);
5527 current_link_up = true;
5532 /* Want auto-negotiation. */
5533 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5535 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5536 if (flowctrl & ADVERTISE_1000XPAUSE)
5537 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5538 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5539 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5541 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5542 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5543 tp->serdes_counter &&
5544 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5545 MAC_STATUS_RCVD_CFG)) ==
5546 MAC_STATUS_PCS_SYNCED)) {
5547 tp->serdes_counter--;
5548 current_link_up = true;
5553 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5554 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5556 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5558 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5559 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5560 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5561 MAC_STATUS_SIGNAL_DET)) {
5562 sg_dig_status = tr32(SG_DIG_STATUS);
5563 mac_status = tr32(MAC_STATUS);
5565 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5566 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5567 u32 local_adv = 0, remote_adv = 0;
5569 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5570 local_adv |= ADVERTISE_1000XPAUSE;
5571 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5572 local_adv |= ADVERTISE_1000XPSE_ASYM;
5574 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5575 remote_adv |= LPA_1000XPAUSE;
5576 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5577 remote_adv |= LPA_1000XPAUSE_ASYM;
5579 tp->link_config.rmt_adv =
5580 mii_adv_to_ethtool_adv_x(remote_adv);
5582 tg3_setup_flow_control(tp, local_adv, remote_adv);
5583 current_link_up = true;
5584 tp->serdes_counter = 0;
5585 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5586 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5587 if (tp->serdes_counter)
5588 tp->serdes_counter--;
5591 u32 val = serdes_cfg;
5598 tw32_f(MAC_SERDES_CFG, val);
5601 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5604 /* Link parallel detection - link is up */
5605 /* only if we have PCS_SYNC and not */
5606 /* receiving config code words */
5607 mac_status = tr32(MAC_STATUS);
5608 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5609 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5610 tg3_setup_flow_control(tp, 0, 0);
5611 current_link_up = true;
5613 TG3_PHYFLG_PARALLEL_DETECT;
5614 tp->serdes_counter =
5615 SERDES_PARALLEL_DET_TIMEOUT;
5617 goto restart_autoneg;
5621 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5622 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5626 return current_link_up;
5629 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5631 bool current_link_up = false;
5633 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5636 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5637 u32 txflags, rxflags;
5640 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5641 u32 local_adv = 0, remote_adv = 0;
5643 if (txflags & ANEG_CFG_PS1)
5644 local_adv |= ADVERTISE_1000XPAUSE;
5645 if (txflags & ANEG_CFG_PS2)
5646 local_adv |= ADVERTISE_1000XPSE_ASYM;
5648 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5649 remote_adv |= LPA_1000XPAUSE;
5650 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5651 remote_adv |= LPA_1000XPAUSE_ASYM;
5653 tp->link_config.rmt_adv =
5654 mii_adv_to_ethtool_adv_x(remote_adv);
5656 tg3_setup_flow_control(tp, local_adv, remote_adv);
5658 current_link_up = true;
5660 for (i = 0; i < 30; i++) {
5663 (MAC_STATUS_SYNC_CHANGED |
5664 MAC_STATUS_CFG_CHANGED));
5666 if ((tr32(MAC_STATUS) &
5667 (MAC_STATUS_SYNC_CHANGED |
5668 MAC_STATUS_CFG_CHANGED)) == 0)
5672 mac_status = tr32(MAC_STATUS);
5673 if (!current_link_up &&
5674 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5675 !(mac_status & MAC_STATUS_RCVD_CFG))
5676 current_link_up = true;
5678 tg3_setup_flow_control(tp, 0, 0);
5680 /* Forcing 1000FD link up. */
5681 current_link_up = true;
5683 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5686 tw32_f(MAC_MODE, tp->mac_mode);
5691 return current_link_up;
5694 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5697 u32 orig_active_speed;
5698 u8 orig_active_duplex;
5700 bool current_link_up;
5703 orig_pause_cfg = tp->link_config.active_flowctrl;
5704 orig_active_speed = tp->link_config.active_speed;
5705 orig_active_duplex = tp->link_config.active_duplex;
5707 if (!tg3_flag(tp, HW_AUTONEG) &&
5709 tg3_flag(tp, INIT_COMPLETE)) {
5710 mac_status = tr32(MAC_STATUS);
5711 mac_status &= (MAC_STATUS_PCS_SYNCED |
5712 MAC_STATUS_SIGNAL_DET |
5713 MAC_STATUS_CFG_CHANGED |
5714 MAC_STATUS_RCVD_CFG);
5715 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5716 MAC_STATUS_SIGNAL_DET)) {
5717 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5718 MAC_STATUS_CFG_CHANGED));
5723 tw32_f(MAC_TX_AUTO_NEG, 0);
5725 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5726 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5727 tw32_f(MAC_MODE, tp->mac_mode);
5730 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5731 tg3_init_bcm8002(tp);
5733 /* Enable link change event even when serdes polling. */
5734 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5737 tp->link_config.rmt_adv = 0;
5738 mac_status = tr32(MAC_STATUS);
5740 if (tg3_flag(tp, HW_AUTONEG))
5741 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5743 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5745 tp->napi[0].hw_status->status =
5746 (SD_STATUS_UPDATED |
5747 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5749 for (i = 0; i < 100; i++) {
5750 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5751 MAC_STATUS_CFG_CHANGED));
5753 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5754 MAC_STATUS_CFG_CHANGED |
5755 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5759 mac_status = tr32(MAC_STATUS);
5760 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5761 current_link_up = false;
5762 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5763 tp->serdes_counter == 0) {
5764 tw32_f(MAC_MODE, (tp->mac_mode |
5765 MAC_MODE_SEND_CONFIGS));
5767 tw32_f(MAC_MODE, tp->mac_mode);
5771 if (current_link_up) {
5772 tp->link_config.active_speed = SPEED_1000;
5773 tp->link_config.active_duplex = DUPLEX_FULL;
5774 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5775 LED_CTRL_LNKLED_OVERRIDE |
5776 LED_CTRL_1000MBPS_ON));
5778 tp->link_config.active_speed = SPEED_UNKNOWN;
5779 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5780 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5781 LED_CTRL_LNKLED_OVERRIDE |
5782 LED_CTRL_TRAFFIC_OVERRIDE));
5785 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5786 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5787 if (orig_pause_cfg != now_pause_cfg ||
5788 orig_active_speed != tp->link_config.active_speed ||
5789 orig_active_duplex != tp->link_config.active_duplex)
5790 tg3_link_report(tp);
5796 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5800 u32 current_speed = SPEED_UNKNOWN;
5801 u8 current_duplex = DUPLEX_UNKNOWN;
5802 bool current_link_up = false;
5803 u32 local_adv, remote_adv, sgsr;
5805 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5806 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5807 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5808 (sgsr & SERDES_TG3_SGMII_MODE)) {
5813 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5815 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5816 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5818 current_link_up = true;
5819 if (sgsr & SERDES_TG3_SPEED_1000) {
5820 current_speed = SPEED_1000;
5821 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5822 } else if (sgsr & SERDES_TG3_SPEED_100) {
5823 current_speed = SPEED_100;
5824 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5826 current_speed = SPEED_10;
5827 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5830 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5831 current_duplex = DUPLEX_FULL;
5833 current_duplex = DUPLEX_HALF;
5836 tw32_f(MAC_MODE, tp->mac_mode);
5839 tg3_clear_mac_status(tp);
5841 goto fiber_setup_done;
5844 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5845 tw32_f(MAC_MODE, tp->mac_mode);
5848 tg3_clear_mac_status(tp);
5853 tp->link_config.rmt_adv = 0;
5855 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5856 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5857 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5858 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5859 bmsr |= BMSR_LSTATUS;
5861 bmsr &= ~BMSR_LSTATUS;
5864 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5866 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5867 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5868 /* do nothing, just check for link up at the end */
5869 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5872 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5873 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5874 ADVERTISE_1000XPAUSE |
5875 ADVERTISE_1000XPSE_ASYM |
5878 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5879 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5881 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5882 tg3_writephy(tp, MII_ADVERTISE, newadv);
5883 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5884 tg3_writephy(tp, MII_BMCR, bmcr);
5886 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5887 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5888 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5895 bmcr &= ~BMCR_SPEED1000;
5896 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5898 if (tp->link_config.duplex == DUPLEX_FULL)
5899 new_bmcr |= BMCR_FULLDPLX;
5901 if (new_bmcr != bmcr) {
5902 /* BMCR_SPEED1000 is a reserved bit that needs
5903 * to be set on write.
5905 new_bmcr |= BMCR_SPEED1000;
5907 /* Force a linkdown */
5911 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5912 adv &= ~(ADVERTISE_1000XFULL |
5913 ADVERTISE_1000XHALF |
5915 tg3_writephy(tp, MII_ADVERTISE, adv);
5916 tg3_writephy(tp, MII_BMCR, bmcr |
5920 tg3_carrier_off(tp);
5922 tg3_writephy(tp, MII_BMCR, new_bmcr);
5924 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5925 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5926 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5927 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5928 bmsr |= BMSR_LSTATUS;
5930 bmsr &= ~BMSR_LSTATUS;
5932 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5936 if (bmsr & BMSR_LSTATUS) {
5937 current_speed = SPEED_1000;
5938 current_link_up = true;
5939 if (bmcr & BMCR_FULLDPLX)
5940 current_duplex = DUPLEX_FULL;
5942 current_duplex = DUPLEX_HALF;
5947 if (bmcr & BMCR_ANENABLE) {
5950 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5951 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5952 common = local_adv & remote_adv;
5953 if (common & (ADVERTISE_1000XHALF |
5954 ADVERTISE_1000XFULL)) {
5955 if (common & ADVERTISE_1000XFULL)
5956 current_duplex = DUPLEX_FULL;
5958 current_duplex = DUPLEX_HALF;
5960 tp->link_config.rmt_adv =
5961 mii_adv_to_ethtool_adv_x(remote_adv);
5962 } else if (!tg3_flag(tp, 5780_CLASS)) {
5963 /* Link is up via parallel detect */
5965 current_link_up = false;
5971 if (current_link_up && current_duplex == DUPLEX_FULL)
5972 tg3_setup_flow_control(tp, local_adv, remote_adv);
5974 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5975 if (tp->link_config.active_duplex == DUPLEX_HALF)
5976 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5978 tw32_f(MAC_MODE, tp->mac_mode);
5981 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5983 tp->link_config.active_speed = current_speed;
5984 tp->link_config.active_duplex = current_duplex;
5986 tg3_test_and_report_link_chg(tp, current_link_up);
5990 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5992 if (tp->serdes_counter) {
5993 /* Give autoneg time to complete. */
5994 tp->serdes_counter--;
5999 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6002 tg3_readphy(tp, MII_BMCR, &bmcr);
6003 if (bmcr & BMCR_ANENABLE) {
6006 /* Select shadow register 0x1f */
6007 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6008 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6010 /* Select expansion interrupt status register */
6011 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6012 MII_TG3_DSP_EXP1_INT_STAT);
6013 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6014 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6016 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6017 /* We have signal detect and not receiving
6018 * config code words, link is up by parallel
6022 bmcr &= ~BMCR_ANENABLE;
6023 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6024 tg3_writephy(tp, MII_BMCR, bmcr);
6025 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6028 } else if (tp->link_up &&
6029 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6030 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6033 /* Select expansion interrupt status register */
6034 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6035 MII_TG3_DSP_EXP1_INT_STAT);
6036 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6040 /* Config code words received, turn on autoneg. */
6041 tg3_readphy(tp, MII_BMCR, &bmcr);
6042 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6044 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6050 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6055 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6056 err = tg3_setup_fiber_phy(tp, force_reset);
6057 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6058 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6060 err = tg3_setup_copper_phy(tp, force_reset);
6062 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6065 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6066 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6068 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6073 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6074 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6075 tw32(GRC_MISC_CFG, val);
6078 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6079 (6 << TX_LENGTHS_IPG_SHIFT);
6080 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6081 tg3_asic_rev(tp) == ASIC_REV_5762)
6082 val |= tr32(MAC_TX_LENGTHS) &
6083 (TX_LENGTHS_JMB_FRM_LEN_MSK |
6084 TX_LENGTHS_CNT_DWN_VAL_MSK);
6086 if (tp->link_config.active_speed == SPEED_1000 &&
6087 tp->link_config.active_duplex == DUPLEX_HALF)
6088 tw32(MAC_TX_LENGTHS, val |
6089 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6091 tw32(MAC_TX_LENGTHS, val |
6092 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6094 if (!tg3_flag(tp, 5705_PLUS)) {
6096 tw32(HOSTCC_STAT_COAL_TICKS,
6097 tp->coal.stats_block_coalesce_usecs);
6099 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6103 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6104 val = tr32(PCIE_PWR_MGMT_THRESH);
6106 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6109 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6110 tw32(PCIE_PWR_MGMT_THRESH, val);
6116 /* tp->lock must be held */
6117 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
6121 ptp_read_system_prets(sts);
6122 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6123 ptp_read_system_postts(sts);
6124 stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6129 /* tp->lock must be held */
6130 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6132 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6134 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6135 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6136 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6137 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6140 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6141 static inline void tg3_full_unlock(struct tg3 *tp);
6142 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6144 struct tg3 *tp = netdev_priv(dev);
6146 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6147 SOF_TIMESTAMPING_RX_SOFTWARE |
6148 SOF_TIMESTAMPING_SOFTWARE;
6150 if (tg3_flag(tp, PTP_CAPABLE)) {
6151 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6152 SOF_TIMESTAMPING_RX_HARDWARE |
6153 SOF_TIMESTAMPING_RAW_HARDWARE;
6157 info->phc_index = ptp_clock_index(tp->ptp_clock);
6159 info->phc_index = -1;
6161 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6163 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6164 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6165 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6166 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6170 static int tg3_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
6172 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6176 /* Frequency adjustment is performed using hardware with a 24 bit
6177 * accumulator and a programmable correction value. On each clk, the
6178 * correction value gets added to the accumulator and when it
6179 * overflows, the time counter is incremented/decremented.
6181 neg_adj = diff_by_scaled_ppm(1 << 24, scaled_ppm, &correction);
6183 tg3_full_lock(tp, 0);
6186 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6187 TG3_EAV_REF_CLK_CORRECT_EN |
6188 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) |
6189 ((u32)correction & TG3_EAV_REF_CLK_CORRECT_MASK));
6191 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6193 tg3_full_unlock(tp);
6198 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6200 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6202 tg3_full_lock(tp, 0);
6203 tp->ptp_adjust += delta;
6204 tg3_full_unlock(tp);
6209 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
6210 struct ptp_system_timestamp *sts)
6213 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6215 tg3_full_lock(tp, 0);
6216 ns = tg3_refclk_read(tp, sts);
6217 ns += tp->ptp_adjust;
6218 tg3_full_unlock(tp);
6220 *ts = ns_to_timespec64(ns);
6225 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6226 const struct timespec64 *ts)
6229 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6231 ns = timespec64_to_ns(ts);
6233 tg3_full_lock(tp, 0);
6234 tg3_refclk_write(tp, ns);
6236 tg3_full_unlock(tp);
6241 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6242 struct ptp_clock_request *rq, int on)
6244 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6249 case PTP_CLK_REQ_PEROUT:
6250 /* Reject requests with unsupported flags */
6251 if (rq->perout.flags)
6254 if (rq->perout.index != 0)
6257 tg3_full_lock(tp, 0);
6258 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6259 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6264 nsec = rq->perout.start.sec * 1000000000ULL +
6265 rq->perout.start.nsec;
6267 if (rq->perout.period.sec || rq->perout.period.nsec) {
6268 netdev_warn(tp->dev,
6269 "Device supports only a one-shot timesync output, period must be 0\n");
6274 if (nsec & (1ULL << 63)) {
6275 netdev_warn(tp->dev,
6276 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6281 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6282 tw32(TG3_EAV_WATCHDOG0_MSB,
6283 TG3_EAV_WATCHDOG0_EN |
6284 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6286 tw32(TG3_EAV_REF_CLCK_CTL,
6287 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6289 tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6290 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6294 tg3_full_unlock(tp);
6304 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6305 struct skb_shared_hwtstamps *timestamp)
6307 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6308 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6312 static void tg3_read_tx_tstamp(struct tg3 *tp, u64 *hwclock)
6314 *hwclock = tr32(TG3_TX_TSTAMP_LSB);
6315 *hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6318 static long tg3_ptp_ts_aux_work(struct ptp_clock_info *ptp)
6320 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6321 struct skb_shared_hwtstamps timestamp;
6324 if (tp->ptp_txts_retrycnt > 2)
6327 tg3_read_tx_tstamp(tp, &hwclock);
6329 if (hwclock != tp->pre_tx_ts) {
6330 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6331 skb_tstamp_tx(tp->tx_tstamp_skb, ×tamp);
6334 tp->ptp_txts_retrycnt++;
6337 dev_consume_skb_any(tp->tx_tstamp_skb);
6338 tp->tx_tstamp_skb = NULL;
6339 tp->ptp_txts_retrycnt = 0;
6344 static const struct ptp_clock_info tg3_ptp_caps = {
6345 .owner = THIS_MODULE,
6346 .name = "tg3 clock",
6347 .max_adj = 250000000,
6353 .adjfine = tg3_ptp_adjfine,
6354 .adjtime = tg3_ptp_adjtime,
6355 .do_aux_work = tg3_ptp_ts_aux_work,
6356 .gettimex64 = tg3_ptp_gettimex,
6357 .settime64 = tg3_ptp_settime,
6358 .enable = tg3_ptp_enable,
6361 /* tp->lock must be held */
6362 static void tg3_ptp_init(struct tg3 *tp)
6364 if (!tg3_flag(tp, PTP_CAPABLE))
6367 /* Initialize the hardware clock to the system time. */
6368 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6370 tp->ptp_info = tg3_ptp_caps;
6373 /* tp->lock must be held */
6374 static void tg3_ptp_resume(struct tg3 *tp)
6376 if (!tg3_flag(tp, PTP_CAPABLE))
6379 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6383 static void tg3_ptp_fini(struct tg3 *tp)
6385 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6388 ptp_clock_unregister(tp->ptp_clock);
6389 tp->ptp_clock = NULL;
6391 dev_consume_skb_any(tp->tx_tstamp_skb);
6392 tp->tx_tstamp_skb = NULL;
6395 static inline int tg3_irq_sync(struct tg3 *tp)
6397 return tp->irq_sync;
6400 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6404 dst = (u32 *)((u8 *)dst + off);
6405 for (i = 0; i < len; i += sizeof(u32))
6406 *dst++ = tr32(off + i);
6409 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6411 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6412 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6413 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6414 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6415 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6416 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6417 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6418 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6419 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6420 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6421 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6422 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6423 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6424 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6425 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6426 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6427 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6428 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6429 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6431 if (tg3_flag(tp, SUPPORT_MSIX))
6432 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6434 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6435 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6436 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6437 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6438 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6439 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6440 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6441 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6443 if (!tg3_flag(tp, 5705_PLUS)) {
6444 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6445 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6446 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6449 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6450 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6451 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6452 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6453 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6455 if (tg3_flag(tp, NVRAM))
6456 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6459 static void tg3_dump_state(struct tg3 *tp)
6464 /* If it is a PCI error, all registers will be 0xffff,
6465 * we don't dump them out, just report the error and return
6467 if (tp->pdev->error_state != pci_channel_io_normal) {
6468 netdev_err(tp->dev, "PCI channel ERROR!\n");
6472 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6476 if (tg3_flag(tp, PCI_EXPRESS)) {
6477 /* Read up to but not including private PCI registers */
6478 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6479 regs[i / sizeof(u32)] = tr32(i);
6481 tg3_dump_legacy_regs(tp, regs);
6483 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6484 if (!regs[i + 0] && !regs[i + 1] &&
6485 !regs[i + 2] && !regs[i + 3])
6488 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6490 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6495 for (i = 0; i < tp->irq_cnt; i++) {
6496 struct tg3_napi *tnapi = &tp->napi[i];
6498 /* SW status block */
6500 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6502 tnapi->hw_status->status,
6503 tnapi->hw_status->status_tag,
6504 tnapi->hw_status->rx_jumbo_consumer,
6505 tnapi->hw_status->rx_consumer,
6506 tnapi->hw_status->rx_mini_consumer,
6507 tnapi->hw_status->idx[0].rx_producer,
6508 tnapi->hw_status->idx[0].tx_consumer);
6511 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6513 tnapi->last_tag, tnapi->last_irq_tag,
6514 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6516 tnapi->prodring.rx_std_prod_idx,
6517 tnapi->prodring.rx_std_cons_idx,
6518 tnapi->prodring.rx_jmb_prod_idx,
6519 tnapi->prodring.rx_jmb_cons_idx);
6523 /* This is called whenever we suspect that the system chipset is re-
6524 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6525 * is bogus tx completions. We try to recover by setting the
6526 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6529 static void tg3_tx_recover(struct tg3 *tp)
6531 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6532 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6534 netdev_warn(tp->dev,
6535 "The system may be re-ordering memory-mapped I/O "
6536 "cycles to the network device, attempting to recover. "
6537 "Please report the problem to the driver maintainer "
6538 "and include system chipset information.\n");
6540 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6543 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6545 /* Tell compiler to fetch tx indices from memory. */
6547 return tnapi->tx_pending -
6548 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6551 /* Tigon3 never reports partial packet sends. So we do not
6552 * need special logic to handle SKBs that have not had all
6553 * of their frags sent yet, like SunGEM does.
6555 static void tg3_tx(struct tg3_napi *tnapi)
6557 struct tg3 *tp = tnapi->tp;
6558 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6559 u32 sw_idx = tnapi->tx_cons;
6560 struct netdev_queue *txq;
6561 int index = tnapi - tp->napi;
6562 unsigned int pkts_compl = 0, bytes_compl = 0;
6564 if (tg3_flag(tp, ENABLE_TSS))
6567 txq = netdev_get_tx_queue(tp->dev, index);
6569 while (sw_idx != hw_idx) {
6570 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6571 bool complete_skb_later = false;
6572 struct sk_buff *skb = ri->skb;
6575 if (unlikely(skb == NULL)) {
6580 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6581 struct skb_shared_hwtstamps timestamp;
6584 tg3_read_tx_tstamp(tp, &hwclock);
6585 if (hwclock != tp->pre_tx_ts) {
6586 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6587 skb_tstamp_tx(skb, ×tamp);
6590 tp->tx_tstamp_skb = skb;
6591 complete_skb_later = true;
6595 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping),
6596 skb_headlen(skb), DMA_TO_DEVICE);
6600 while (ri->fragmented) {
6601 ri->fragmented = false;
6602 sw_idx = NEXT_TX(sw_idx);
6603 ri = &tnapi->tx_buffers[sw_idx];
6606 sw_idx = NEXT_TX(sw_idx);
6608 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6609 ri = &tnapi->tx_buffers[sw_idx];
6610 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6613 dma_unmap_page(&tp->pdev->dev,
6614 dma_unmap_addr(ri, mapping),
6615 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6618 while (ri->fragmented) {
6619 ri->fragmented = false;
6620 sw_idx = NEXT_TX(sw_idx);
6621 ri = &tnapi->tx_buffers[sw_idx];
6624 sw_idx = NEXT_TX(sw_idx);
6628 bytes_compl += skb->len;
6630 if (!complete_skb_later)
6631 dev_consume_skb_any(skb);
6633 ptp_schedule_worker(tp->ptp_clock, 0);
6635 if (unlikely(tx_bug)) {
6641 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6643 tnapi->tx_cons = sw_idx;
6645 /* Need to make the tx_cons update visible to __tg3_start_xmit()
6646 * before checking for netif_queue_stopped(). Without the
6647 * memory barrier, there is a small possibility that __tg3_start_xmit()
6648 * will miss it and cause the queue to be stopped forever.
6652 if (unlikely(netif_tx_queue_stopped(txq) &&
6653 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6654 __netif_tx_lock(txq, smp_processor_id());
6655 if (netif_tx_queue_stopped(txq) &&
6656 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6657 netif_tx_wake_queue(txq);
6658 __netif_tx_unlock(txq);
6662 static void tg3_frag_free(bool is_frag, void *data)
6665 skb_free_frag(data);
6670 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6672 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6673 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6678 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz,
6680 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6685 /* Returns size of skb allocated or < 0 on error.
6687 * We only need to fill in the address because the other members
6688 * of the RX descriptor are invariant, see tg3_init_rings.
6690 * Note the purposeful assymetry of cpu vs. chip accesses. For
6691 * posting buffers we only dirty the first cache line of the RX
6692 * descriptor (containing the address). Whereas for the RX status
6693 * buffers the cpu only reads the last cacheline of the RX descriptor
6694 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6696 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6697 u32 opaque_key, u32 dest_idx_unmasked,
6698 unsigned int *frag_size)
6700 struct tg3_rx_buffer_desc *desc;
6701 struct ring_info *map;
6704 int skb_size, data_size, dest_idx;
6706 switch (opaque_key) {
6707 case RXD_OPAQUE_RING_STD:
6708 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6709 desc = &tpr->rx_std[dest_idx];
6710 map = &tpr->rx_std_buffers[dest_idx];
6711 data_size = tp->rx_pkt_map_sz;
6714 case RXD_OPAQUE_RING_JUMBO:
6715 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6716 desc = &tpr->rx_jmb[dest_idx].std;
6717 map = &tpr->rx_jmb_buffers[dest_idx];
6718 data_size = TG3_RX_JMB_MAP_SZ;
6725 /* Do not overwrite any of the map or rp information
6726 * until we are sure we can commit to a new buffer.
6728 * Callers depend upon this behavior and assume that
6729 * we leave everything unchanged if we fail.
6731 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6732 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6733 if (skb_size <= PAGE_SIZE) {
6734 data = napi_alloc_frag(skb_size);
6735 *frag_size = skb_size;
6737 data = kmalloc(skb_size, GFP_ATOMIC);
6743 mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp),
6744 data_size, DMA_FROM_DEVICE);
6745 if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
6746 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6751 dma_unmap_addr_set(map, mapping, mapping);
6753 desc->addr_hi = ((u64)mapping >> 32);
6754 desc->addr_lo = ((u64)mapping & 0xffffffff);
6759 /* We only need to move over in the address because the other
6760 * members of the RX descriptor are invariant. See notes above
6761 * tg3_alloc_rx_data for full details.
6763 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6764 struct tg3_rx_prodring_set *dpr,
6765 u32 opaque_key, int src_idx,
6766 u32 dest_idx_unmasked)
6768 struct tg3 *tp = tnapi->tp;
6769 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6770 struct ring_info *src_map, *dest_map;
6771 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6774 switch (opaque_key) {
6775 case RXD_OPAQUE_RING_STD:
6776 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6777 dest_desc = &dpr->rx_std[dest_idx];
6778 dest_map = &dpr->rx_std_buffers[dest_idx];
6779 src_desc = &spr->rx_std[src_idx];
6780 src_map = &spr->rx_std_buffers[src_idx];
6783 case RXD_OPAQUE_RING_JUMBO:
6784 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6785 dest_desc = &dpr->rx_jmb[dest_idx].std;
6786 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6787 src_desc = &spr->rx_jmb[src_idx].std;
6788 src_map = &spr->rx_jmb_buffers[src_idx];
6795 dest_map->data = src_map->data;
6796 dma_unmap_addr_set(dest_map, mapping,
6797 dma_unmap_addr(src_map, mapping));
6798 dest_desc->addr_hi = src_desc->addr_hi;
6799 dest_desc->addr_lo = src_desc->addr_lo;
6801 /* Ensure that the update to the skb happens after the physical
6802 * addresses have been transferred to the new BD location.
6806 src_map->data = NULL;
6809 /* The RX ring scheme is composed of multiple rings which post fresh
6810 * buffers to the chip, and one special ring the chip uses to report
6811 * status back to the host.
6813 * The special ring reports the status of received packets to the
6814 * host. The chip does not write into the original descriptor the
6815 * RX buffer was obtained from. The chip simply takes the original
6816 * descriptor as provided by the host, updates the status and length
6817 * field, then writes this into the next status ring entry.
6819 * Each ring the host uses to post buffers to the chip is described
6820 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6821 * it is first placed into the on-chip ram. When the packet's length
6822 * is known, it walks down the TG3_BDINFO entries to select the ring.
6823 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6824 * which is within the range of the new packet's length is chosen.
6826 * The "separate ring for rx status" scheme may sound queer, but it makes
6827 * sense from a cache coherency perspective. If only the host writes
6828 * to the buffer post rings, and only the chip writes to the rx status
6829 * rings, then cache lines never move beyond shared-modified state.
6830 * If both the host and chip were to write into the same ring, cache line
6831 * eviction could occur since both entities want it in an exclusive state.
6833 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6835 struct tg3 *tp = tnapi->tp;
6836 u32 work_mask, rx_std_posted = 0;
6837 u32 std_prod_idx, jmb_prod_idx;
6838 u32 sw_idx = tnapi->rx_rcb_ptr;
6841 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6843 hw_idx = *(tnapi->rx_rcb_prod_idx);
6845 * We need to order the read of hw_idx and the read of
6846 * the opaque cookie.
6851 std_prod_idx = tpr->rx_std_prod_idx;
6852 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6853 while (sw_idx != hw_idx && budget > 0) {
6854 struct ring_info *ri;
6855 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6857 struct sk_buff *skb;
6858 dma_addr_t dma_addr;
6859 u32 opaque_key, desc_idx, *post_ptr;
6863 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6864 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6865 if (opaque_key == RXD_OPAQUE_RING_STD) {
6866 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6867 dma_addr = dma_unmap_addr(ri, mapping);
6869 post_ptr = &std_prod_idx;
6871 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6872 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6873 dma_addr = dma_unmap_addr(ri, mapping);
6875 post_ptr = &jmb_prod_idx;
6877 goto next_pkt_nopost;
6879 work_mask |= opaque_key;
6881 if (desc->err_vlan & RXD_ERR_MASK) {
6883 tg3_recycle_rx(tnapi, tpr, opaque_key,
6884 desc_idx, *post_ptr);
6886 /* Other statistics kept track of by card. */
6887 tnapi->rx_dropped++;
6891 prefetch(data + TG3_RX_OFFSET(tp));
6892 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6895 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6896 RXD_FLAG_PTPSTAT_PTPV1 ||
6897 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6898 RXD_FLAG_PTPSTAT_PTPV2) {
6899 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6900 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6903 if (len > TG3_RX_COPY_THRESH(tp)) {
6905 unsigned int frag_size;
6907 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6908 *post_ptr, &frag_size);
6912 dma_unmap_single(&tp->pdev->dev, dma_addr, skb_size,
6915 /* Ensure that the update to the data happens
6916 * after the usage of the old DMA mapping.
6923 skb = build_skb(data, frag_size);
6925 skb = slab_build_skb(data);
6927 tg3_frag_free(frag_size != 0, data);
6928 goto drop_it_no_recycle;
6930 skb_reserve(skb, TG3_RX_OFFSET(tp));
6932 tg3_recycle_rx(tnapi, tpr, opaque_key,
6933 desc_idx, *post_ptr);
6935 skb = netdev_alloc_skb(tp->dev,
6936 len + TG3_RAW_IP_ALIGN);
6938 goto drop_it_no_recycle;
6940 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6941 dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len,
6944 data + TG3_RX_OFFSET(tp),
6946 dma_sync_single_for_device(&tp->pdev->dev, dma_addr,
6947 len, DMA_FROM_DEVICE);
6952 tg3_hwclock_to_timestamp(tp, tstamp,
6953 skb_hwtstamps(skb));
6955 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6956 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6957 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6958 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6959 skb->ip_summed = CHECKSUM_UNNECESSARY;
6961 skb_checksum_none_assert(skb);
6963 skb->protocol = eth_type_trans(skb, tp->dev);
6965 if (len > (tp->dev->mtu + ETH_HLEN) &&
6966 skb->protocol != htons(ETH_P_8021Q) &&
6967 skb->protocol != htons(ETH_P_8021AD)) {
6968 dev_kfree_skb_any(skb);
6969 goto drop_it_no_recycle;
6972 if (desc->type_flags & RXD_FLAG_VLAN &&
6973 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6974 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6975 desc->err_vlan & RXD_VLAN_MASK);
6977 napi_gro_receive(&tnapi->napi, skb);
6985 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6986 tpr->rx_std_prod_idx = std_prod_idx &
6987 tp->rx_std_ring_mask;
6988 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6989 tpr->rx_std_prod_idx);
6990 work_mask &= ~RXD_OPAQUE_RING_STD;
6995 sw_idx &= tp->rx_ret_ring_mask;
6997 /* Refresh hw_idx to see if there is new work */
6998 if (sw_idx == hw_idx) {
6999 hw_idx = *(tnapi->rx_rcb_prod_idx);
7004 /* ACK the status ring. */
7005 tnapi->rx_rcb_ptr = sw_idx;
7006 tw32_rx_mbox(tnapi->consmbox, sw_idx);
7008 /* Refill RX ring(s). */
7009 if (!tg3_flag(tp, ENABLE_RSS)) {
7010 /* Sync BD data before updating mailbox */
7013 if (work_mask & RXD_OPAQUE_RING_STD) {
7014 tpr->rx_std_prod_idx = std_prod_idx &
7015 tp->rx_std_ring_mask;
7016 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7017 tpr->rx_std_prod_idx);
7019 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
7020 tpr->rx_jmb_prod_idx = jmb_prod_idx &
7021 tp->rx_jmb_ring_mask;
7022 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7023 tpr->rx_jmb_prod_idx);
7025 } else if (work_mask) {
7026 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
7027 * updated before the producer indices can be updated.
7031 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
7032 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
7034 if (tnapi != &tp->napi[1]) {
7035 tp->rx_refill = true;
7036 napi_schedule(&tp->napi[1].napi);
7043 static void tg3_poll_link(struct tg3 *tp)
7045 /* handle link change and other phy events */
7046 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7047 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7049 if (sblk->status & SD_STATUS_LINK_CHG) {
7050 sblk->status = SD_STATUS_UPDATED |
7051 (sblk->status & ~SD_STATUS_LINK_CHG);
7052 spin_lock(&tp->lock);
7053 if (tg3_flag(tp, USE_PHYLIB)) {
7055 (MAC_STATUS_SYNC_CHANGED |
7056 MAC_STATUS_CFG_CHANGED |
7057 MAC_STATUS_MI_COMPLETION |
7058 MAC_STATUS_LNKSTATE_CHANGED));
7061 tg3_setup_phy(tp, false);
7062 spin_unlock(&tp->lock);
7067 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7068 struct tg3_rx_prodring_set *dpr,
7069 struct tg3_rx_prodring_set *spr)
7071 u32 si, di, cpycnt, src_prod_idx;
7075 src_prod_idx = spr->rx_std_prod_idx;
7077 /* Make sure updates to the rx_std_buffers[] entries and the
7078 * standard producer index are seen in the correct order.
7082 if (spr->rx_std_cons_idx == src_prod_idx)
7085 if (spr->rx_std_cons_idx < src_prod_idx)
7086 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7088 cpycnt = tp->rx_std_ring_mask + 1 -
7089 spr->rx_std_cons_idx;
7091 cpycnt = min(cpycnt,
7092 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7094 si = spr->rx_std_cons_idx;
7095 di = dpr->rx_std_prod_idx;
7097 for (i = di; i < di + cpycnt; i++) {
7098 if (dpr->rx_std_buffers[i].data) {
7108 /* Ensure that updates to the rx_std_buffers ring and the
7109 * shadowed hardware producer ring from tg3_recycle_skb() are
7110 * ordered correctly WRT the skb check above.
7114 memcpy(&dpr->rx_std_buffers[di],
7115 &spr->rx_std_buffers[si],
7116 cpycnt * sizeof(struct ring_info));
7118 for (i = 0; i < cpycnt; i++, di++, si++) {
7119 struct tg3_rx_buffer_desc *sbd, *dbd;
7120 sbd = &spr->rx_std[si];
7121 dbd = &dpr->rx_std[di];
7122 dbd->addr_hi = sbd->addr_hi;
7123 dbd->addr_lo = sbd->addr_lo;
7126 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7127 tp->rx_std_ring_mask;
7128 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7129 tp->rx_std_ring_mask;
7133 src_prod_idx = spr->rx_jmb_prod_idx;
7135 /* Make sure updates to the rx_jmb_buffers[] entries and
7136 * the jumbo producer index are seen in the correct order.
7140 if (spr->rx_jmb_cons_idx == src_prod_idx)
7143 if (spr->rx_jmb_cons_idx < src_prod_idx)
7144 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7146 cpycnt = tp->rx_jmb_ring_mask + 1 -
7147 spr->rx_jmb_cons_idx;
7149 cpycnt = min(cpycnt,
7150 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7152 si = spr->rx_jmb_cons_idx;
7153 di = dpr->rx_jmb_prod_idx;
7155 for (i = di; i < di + cpycnt; i++) {
7156 if (dpr->rx_jmb_buffers[i].data) {
7166 /* Ensure that updates to the rx_jmb_buffers ring and the
7167 * shadowed hardware producer ring from tg3_recycle_skb() are
7168 * ordered correctly WRT the skb check above.
7172 memcpy(&dpr->rx_jmb_buffers[di],
7173 &spr->rx_jmb_buffers[si],
7174 cpycnt * sizeof(struct ring_info));
7176 for (i = 0; i < cpycnt; i++, di++, si++) {
7177 struct tg3_rx_buffer_desc *sbd, *dbd;
7178 sbd = &spr->rx_jmb[si].std;
7179 dbd = &dpr->rx_jmb[di].std;
7180 dbd->addr_hi = sbd->addr_hi;
7181 dbd->addr_lo = sbd->addr_lo;
7184 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7185 tp->rx_jmb_ring_mask;
7186 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7187 tp->rx_jmb_ring_mask;
7193 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7195 struct tg3 *tp = tnapi->tp;
7197 /* run TX completion thread */
7198 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7200 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7204 if (!tnapi->rx_rcb_prod_idx)
7207 /* run RX thread, within the bounds set by NAPI.
7208 * All RX "locking" is done by ensuring outside
7209 * code synchronizes with tg3->napi.poll()
7211 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7212 work_done += tg3_rx(tnapi, budget - work_done);
7214 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7215 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7217 u32 std_prod_idx = dpr->rx_std_prod_idx;
7218 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7220 tp->rx_refill = false;
7221 for (i = 1; i <= tp->rxq_cnt; i++)
7222 err |= tg3_rx_prodring_xfer(tp, dpr,
7223 &tp->napi[i].prodring);
7227 if (std_prod_idx != dpr->rx_std_prod_idx)
7228 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7229 dpr->rx_std_prod_idx);
7231 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7232 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7233 dpr->rx_jmb_prod_idx);
7236 tw32_f(HOSTCC_MODE, tp->coal_now);
7242 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7244 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7245 schedule_work(&tp->reset_task);
7248 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7250 if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7251 cancel_work_sync(&tp->reset_task);
7252 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7255 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7257 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7258 struct tg3 *tp = tnapi->tp;
7260 struct tg3_hw_status *sblk = tnapi->hw_status;
7263 work_done = tg3_poll_work(tnapi, work_done, budget);
7265 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7268 if (unlikely(work_done >= budget))
7271 /* tp->last_tag is used in tg3_int_reenable() below
7272 * to tell the hw how much work has been processed,
7273 * so we must read it before checking for more work.
7275 tnapi->last_tag = sblk->status_tag;
7276 tnapi->last_irq_tag = tnapi->last_tag;
7279 /* check for RX/TX work to do */
7280 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7281 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7283 /* This test here is not race free, but will reduce
7284 * the number of interrupts by looping again.
7286 if (tnapi == &tp->napi[1] && tp->rx_refill)
7289 napi_complete_done(napi, work_done);
7290 /* Reenable interrupts. */
7291 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7293 /* This test here is synchronized by napi_schedule()
7294 * and napi_complete() to close the race condition.
7296 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7297 tw32(HOSTCC_MODE, tp->coalesce_mode |
7298 HOSTCC_MODE_ENABLE |
7305 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7309 /* work_done is guaranteed to be less than budget. */
7310 napi_complete(napi);
7311 tg3_reset_task_schedule(tp);
7315 static void tg3_process_error(struct tg3 *tp)
7318 bool real_error = false;
7320 if (tg3_flag(tp, ERROR_PROCESSED))
7323 /* Check Flow Attention register */
7324 val = tr32(HOSTCC_FLOW_ATTN);
7325 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7326 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7330 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7331 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7335 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7336 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7345 tg3_flag_set(tp, ERROR_PROCESSED);
7346 tg3_reset_task_schedule(tp);
7349 static int tg3_poll(struct napi_struct *napi, int budget)
7351 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7352 struct tg3 *tp = tnapi->tp;
7354 struct tg3_hw_status *sblk = tnapi->hw_status;
7357 if (sblk->status & SD_STATUS_ERROR)
7358 tg3_process_error(tp);
7362 work_done = tg3_poll_work(tnapi, work_done, budget);
7364 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7367 if (unlikely(work_done >= budget))
7370 if (tg3_flag(tp, TAGGED_STATUS)) {
7371 /* tp->last_tag is used in tg3_int_reenable() below
7372 * to tell the hw how much work has been processed,
7373 * so we must read it before checking for more work.
7375 tnapi->last_tag = sblk->status_tag;
7376 tnapi->last_irq_tag = tnapi->last_tag;
7379 sblk->status &= ~SD_STATUS_UPDATED;
7381 if (likely(!tg3_has_work(tnapi))) {
7382 napi_complete_done(napi, work_done);
7383 tg3_int_reenable(tnapi);
7388 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7392 /* work_done is guaranteed to be less than budget. */
7393 napi_complete(napi);
7394 tg3_reset_task_schedule(tp);
7398 static void tg3_napi_disable(struct tg3 *tp)
7402 for (i = tp->irq_cnt - 1; i >= 0; i--)
7403 napi_disable(&tp->napi[i].napi);
7406 static void tg3_napi_enable(struct tg3 *tp)
7410 for (i = 0; i < tp->irq_cnt; i++)
7411 napi_enable(&tp->napi[i].napi);
7414 static void tg3_napi_init(struct tg3 *tp)
7418 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll);
7419 for (i = 1; i < tp->irq_cnt; i++)
7420 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix);
7423 static void tg3_napi_fini(struct tg3 *tp)
7427 for (i = 0; i < tp->irq_cnt; i++)
7428 netif_napi_del(&tp->napi[i].napi);
7431 static inline void tg3_netif_stop(struct tg3 *tp)
7433 netif_trans_update(tp->dev); /* prevent tx timeout */
7434 tg3_napi_disable(tp);
7435 netif_carrier_off(tp->dev);
7436 netif_tx_disable(tp->dev);
7439 /* tp->lock must be held */
7440 static inline void tg3_netif_start(struct tg3 *tp)
7444 /* NOTE: unconditional netif_tx_wake_all_queues is only
7445 * appropriate so long as all callers are assured to
7446 * have free tx slots (such as after tg3_init_hw)
7448 netif_tx_wake_all_queues(tp->dev);
7451 netif_carrier_on(tp->dev);
7453 tg3_napi_enable(tp);
7454 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7455 tg3_enable_ints(tp);
7458 static void tg3_irq_quiesce(struct tg3 *tp)
7459 __releases(tp->lock)
7460 __acquires(tp->lock)
7464 BUG_ON(tp->irq_sync);
7469 spin_unlock_bh(&tp->lock);
7471 for (i = 0; i < tp->irq_cnt; i++)
7472 synchronize_irq(tp->napi[i].irq_vec);
7474 spin_lock_bh(&tp->lock);
7477 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7478 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7479 * with as well. Most of the time, this is not necessary except when
7480 * shutting down the device.
7482 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7484 spin_lock_bh(&tp->lock);
7486 tg3_irq_quiesce(tp);
7489 static inline void tg3_full_unlock(struct tg3 *tp)
7491 spin_unlock_bh(&tp->lock);
7494 /* One-shot MSI handler - Chip automatically disables interrupt
7495 * after sending MSI so driver doesn't have to do it.
7497 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7499 struct tg3_napi *tnapi = dev_id;
7500 struct tg3 *tp = tnapi->tp;
7502 prefetch(tnapi->hw_status);
7504 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7506 if (likely(!tg3_irq_sync(tp)))
7507 napi_schedule(&tnapi->napi);
7512 /* MSI ISR - No need to check for interrupt sharing and no need to
7513 * flush status block and interrupt mailbox. PCI ordering rules
7514 * guarantee that MSI will arrive after the status block.
7516 static irqreturn_t tg3_msi(int irq, void *dev_id)
7518 struct tg3_napi *tnapi = dev_id;
7519 struct tg3 *tp = tnapi->tp;
7521 prefetch(tnapi->hw_status);
7523 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7525 * Writing any value to intr-mbox-0 clears PCI INTA# and
7526 * chip-internal interrupt pending events.
7527 * Writing non-zero to intr-mbox-0 additional tells the
7528 * NIC to stop sending us irqs, engaging "in-intr-handler"
7531 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7532 if (likely(!tg3_irq_sync(tp)))
7533 napi_schedule(&tnapi->napi);
7535 return IRQ_RETVAL(1);
7538 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7540 struct tg3_napi *tnapi = dev_id;
7541 struct tg3 *tp = tnapi->tp;
7542 struct tg3_hw_status *sblk = tnapi->hw_status;
7543 unsigned int handled = 1;
7545 /* In INTx mode, it is possible for the interrupt to arrive at
7546 * the CPU before the status block posted prior to the interrupt.
7547 * Reading the PCI State register will confirm whether the
7548 * interrupt is ours and will flush the status block.
7550 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7551 if (tg3_flag(tp, CHIP_RESETTING) ||
7552 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7559 * Writing any value to intr-mbox-0 clears PCI INTA# and
7560 * chip-internal interrupt pending events.
7561 * Writing non-zero to intr-mbox-0 additional tells the
7562 * NIC to stop sending us irqs, engaging "in-intr-handler"
7565 * Flush the mailbox to de-assert the IRQ immediately to prevent
7566 * spurious interrupts. The flush impacts performance but
7567 * excessive spurious interrupts can be worse in some cases.
7569 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7570 if (tg3_irq_sync(tp))
7572 sblk->status &= ~SD_STATUS_UPDATED;
7573 if (likely(tg3_has_work(tnapi))) {
7574 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7575 napi_schedule(&tnapi->napi);
7577 /* No work, shared interrupt perhaps? re-enable
7578 * interrupts, and flush that PCI write
7580 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7584 return IRQ_RETVAL(handled);
7587 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7589 struct tg3_napi *tnapi = dev_id;
7590 struct tg3 *tp = tnapi->tp;
7591 struct tg3_hw_status *sblk = tnapi->hw_status;
7592 unsigned int handled = 1;
7594 /* In INTx mode, it is possible for the interrupt to arrive at
7595 * the CPU before the status block posted prior to the interrupt.
7596 * Reading the PCI State register will confirm whether the
7597 * interrupt is ours and will flush the status block.
7599 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7600 if (tg3_flag(tp, CHIP_RESETTING) ||
7601 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7608 * writing any value to intr-mbox-0 clears PCI INTA# and
7609 * chip-internal interrupt pending events.
7610 * writing non-zero to intr-mbox-0 additional tells the
7611 * NIC to stop sending us irqs, engaging "in-intr-handler"
7614 * Flush the mailbox to de-assert the IRQ immediately to prevent
7615 * spurious interrupts. The flush impacts performance but
7616 * excessive spurious interrupts can be worse in some cases.
7618 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7621 * In a shared interrupt configuration, sometimes other devices'
7622 * interrupts will scream. We record the current status tag here
7623 * so that the above check can report that the screaming interrupts
7624 * are unhandled. Eventually they will be silenced.
7626 tnapi->last_irq_tag = sblk->status_tag;
7628 if (tg3_irq_sync(tp))
7631 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7633 napi_schedule(&tnapi->napi);
7636 return IRQ_RETVAL(handled);
7639 /* ISR for interrupt test */
7640 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7642 struct tg3_napi *tnapi = dev_id;
7643 struct tg3 *tp = tnapi->tp;
7644 struct tg3_hw_status *sblk = tnapi->hw_status;
7646 if ((sblk->status & SD_STATUS_UPDATED) ||
7647 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7648 tg3_disable_ints(tp);
7649 return IRQ_RETVAL(1);
7651 return IRQ_RETVAL(0);
7654 #ifdef CONFIG_NET_POLL_CONTROLLER
7655 static void tg3_poll_controller(struct net_device *dev)
7658 struct tg3 *tp = netdev_priv(dev);
7660 if (tg3_irq_sync(tp))
7663 for (i = 0; i < tp->irq_cnt; i++)
7664 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7668 static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue)
7670 struct tg3 *tp = netdev_priv(dev);
7672 if (netif_msg_tx_err(tp)) {
7673 netdev_err(dev, "transmit timed out, resetting\n");
7677 tg3_reset_task_schedule(tp);
7680 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7681 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7683 u32 base = (u32) mapping & 0xffffffff;
7685 return base + len + 8 < base;
7688 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7689 * of any 4GB boundaries: 4G, 8G, etc
7691 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7694 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7695 u32 base = (u32) mapping & 0xffffffff;
7697 return ((base + len + (mss & 0x3fff)) < base);
7702 /* Test for DMA addresses > 40-bit */
7703 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7706 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7707 if (tg3_flag(tp, 40BIT_DMA_BUG))
7708 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7715 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7716 dma_addr_t mapping, u32 len, u32 flags,
7719 txbd->addr_hi = ((u64) mapping >> 32);
7720 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7721 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7722 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7725 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7726 dma_addr_t map, u32 len, u32 flags,
7729 struct tg3 *tp = tnapi->tp;
7732 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7735 if (tg3_4g_overflow_test(map, len))
7738 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7741 if (tg3_40bit_overflow_test(tp, map, len))
7744 if (tp->dma_limit) {
7745 u32 prvidx = *entry;
7746 u32 tmp_flag = flags & ~TXD_FLAG_END;
7747 while (len > tp->dma_limit && *budget) {
7748 u32 frag_len = tp->dma_limit;
7749 len -= tp->dma_limit;
7751 /* Avoid the 8byte DMA problem */
7753 len += tp->dma_limit / 2;
7754 frag_len = tp->dma_limit / 2;
7757 tnapi->tx_buffers[*entry].fragmented = true;
7759 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7760 frag_len, tmp_flag, mss, vlan);
7763 *entry = NEXT_TX(*entry);
7770 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7771 len, flags, mss, vlan);
7773 *entry = NEXT_TX(*entry);
7776 tnapi->tx_buffers[prvidx].fragmented = false;
7780 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7781 len, flags, mss, vlan);
7782 *entry = NEXT_TX(*entry);
7788 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7791 struct sk_buff *skb;
7792 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7797 dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping),
7798 skb_headlen(skb), DMA_TO_DEVICE);
7800 while (txb->fragmented) {
7801 txb->fragmented = false;
7802 entry = NEXT_TX(entry);
7803 txb = &tnapi->tx_buffers[entry];
7806 for (i = 0; i <= last; i++) {
7807 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7809 entry = NEXT_TX(entry);
7810 txb = &tnapi->tx_buffers[entry];
7812 dma_unmap_page(&tnapi->tp->pdev->dev,
7813 dma_unmap_addr(txb, mapping),
7814 skb_frag_size(frag), DMA_TO_DEVICE);
7816 while (txb->fragmented) {
7817 txb->fragmented = false;
7818 entry = NEXT_TX(entry);
7819 txb = &tnapi->tx_buffers[entry];
7824 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7825 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7826 struct sk_buff **pskb,
7827 u32 *entry, u32 *budget,
7828 u32 base_flags, u32 mss, u32 vlan)
7830 struct tg3 *tp = tnapi->tp;
7831 struct sk_buff *new_skb, *skb = *pskb;
7832 dma_addr_t new_addr = 0;
7835 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7836 new_skb = skb_copy(skb, GFP_ATOMIC);
7838 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7840 new_skb = skb_copy_expand(skb,
7841 skb_headroom(skb) + more_headroom,
7842 skb_tailroom(skb), GFP_ATOMIC);
7848 /* New SKB is guaranteed to be linear. */
7849 new_addr = dma_map_single(&tp->pdev->dev, new_skb->data,
7850 new_skb->len, DMA_TO_DEVICE);
7851 /* Make sure the mapping succeeded */
7852 if (dma_mapping_error(&tp->pdev->dev, new_addr)) {
7853 dev_kfree_skb_any(new_skb);
7856 u32 save_entry = *entry;
7858 base_flags |= TXD_FLAG_END;
7860 tnapi->tx_buffers[*entry].skb = new_skb;
7861 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7864 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7865 new_skb->len, base_flags,
7867 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7868 dev_kfree_skb_any(new_skb);
7874 dev_consume_skb_any(skb);
7879 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7881 /* Check if we will never have enough descriptors,
7882 * as gso_segs can be more than current ring size
7884 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7887 static netdev_tx_t __tg3_start_xmit(struct sk_buff *, struct net_device *);
7889 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7890 * indicated in tg3_tx_frag_set()
7892 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7893 struct netdev_queue *txq, struct sk_buff *skb)
7895 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7896 struct sk_buff *segs, *seg, *next;
7898 /* Estimate the number of fragments in the worst case */
7899 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7900 netif_tx_stop_queue(txq);
7902 /* netif_tx_stop_queue() must be done before checking
7903 * checking tx index in tg3_tx_avail() below, because in
7904 * tg3_tx(), we update tx index before checking for
7905 * netif_tx_queue_stopped().
7908 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7909 return NETDEV_TX_BUSY;
7911 netif_tx_wake_queue(txq);
7914 segs = skb_gso_segment(skb, tp->dev->features &
7915 ~(NETIF_F_TSO | NETIF_F_TSO6));
7916 if (IS_ERR(segs) || !segs) {
7917 tnapi->tx_dropped++;
7918 goto tg3_tso_bug_end;
7921 skb_list_walk_safe(segs, seg, next) {
7922 skb_mark_not_on_list(seg);
7923 __tg3_start_xmit(seg, tp->dev);
7927 dev_consume_skb_any(skb);
7929 return NETDEV_TX_OK;
7932 /* hard_start_xmit for all devices */
7933 static netdev_tx_t __tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7935 struct tg3 *tp = netdev_priv(dev);
7936 u32 len, entry, base_flags, mss, vlan = 0;
7938 int i = -1, would_hit_hwbug;
7940 struct tg3_napi *tnapi;
7941 struct netdev_queue *txq;
7943 struct iphdr *iph = NULL;
7944 struct tcphdr *tcph = NULL;
7945 __sum16 tcp_csum = 0, ip_csum = 0;
7946 __be16 ip_tot_len = 0;
7948 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7949 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7950 if (tg3_flag(tp, ENABLE_TSS))
7953 budget = tg3_tx_avail(tnapi);
7955 /* We are running in BH disabled context with netif_tx_lock
7956 * and TX reclaim runs via tp->napi.poll inside of a software
7957 * interrupt. Furthermore, IRQ processing runs lockless so we have
7958 * no IRQ context deadlocks to worry about either. Rejoice!
7960 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7961 if (!netif_tx_queue_stopped(txq)) {
7962 netif_tx_stop_queue(txq);
7964 /* This is a hard error, log it. */
7966 "BUG! Tx Ring full when queue awake!\n");
7968 return NETDEV_TX_BUSY;
7971 entry = tnapi->tx_prod;
7974 mss = skb_shinfo(skb)->gso_size;
7976 u32 tcp_opt_len, hdr_len;
7978 if (skb_cow_head(skb, 0))
7982 tcp_opt_len = tcp_optlen(skb);
7984 hdr_len = skb_tcp_all_headers(skb) - ETH_HLEN;
7986 /* HW/FW can not correctly segment packets that have been
7987 * vlan encapsulated.
7989 if (skb->protocol == htons(ETH_P_8021Q) ||
7990 skb->protocol == htons(ETH_P_8021AD)) {
7991 if (tg3_tso_bug_gso_check(tnapi, skb))
7992 return tg3_tso_bug(tp, tnapi, txq, skb);
7996 if (!skb_is_gso_v6(skb)) {
7997 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7998 tg3_flag(tp, TSO_BUG)) {
7999 if (tg3_tso_bug_gso_check(tnapi, skb))
8000 return tg3_tso_bug(tp, tnapi, txq, skb);
8003 ip_csum = iph->check;
8004 ip_tot_len = iph->tot_len;
8006 iph->tot_len = htons(mss + hdr_len);
8009 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
8010 TXD_FLAG_CPU_POST_DMA);
8012 tcph = tcp_hdr(skb);
8013 tcp_csum = tcph->check;
8015 if (tg3_flag(tp, HW_TSO_1) ||
8016 tg3_flag(tp, HW_TSO_2) ||
8017 tg3_flag(tp, HW_TSO_3)) {
8019 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
8021 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
8025 if (tg3_flag(tp, HW_TSO_3)) {
8026 mss |= (hdr_len & 0xc) << 12;
8028 base_flags |= 0x00000010;
8029 base_flags |= (hdr_len & 0x3e0) << 5;
8030 } else if (tg3_flag(tp, HW_TSO_2))
8031 mss |= hdr_len << 9;
8032 else if (tg3_flag(tp, HW_TSO_1) ||
8033 tg3_asic_rev(tp) == ASIC_REV_5705) {
8034 if (tcp_opt_len || iph->ihl > 5) {
8037 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8038 mss |= (tsflags << 11);
8041 if (tcp_opt_len || iph->ihl > 5) {
8044 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8045 base_flags |= tsflags << 12;
8048 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8049 /* HW/FW can not correctly checksum packets that have been
8050 * vlan encapsulated.
8052 if (skb->protocol == htons(ETH_P_8021Q) ||
8053 skb->protocol == htons(ETH_P_8021AD)) {
8054 if (skb_checksum_help(skb))
8057 base_flags |= TXD_FLAG_TCPUDP_CSUM;
8061 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8062 !mss && skb->len > VLAN_ETH_FRAME_LEN)
8063 base_flags |= TXD_FLAG_JMB_PKT;
8065 if (skb_vlan_tag_present(skb)) {
8066 base_flags |= TXD_FLAG_VLAN;
8067 vlan = skb_vlan_tag_get(skb);
8070 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8071 tg3_flag(tp, TX_TSTAMP_EN)) {
8072 tg3_full_lock(tp, 0);
8073 if (!tp->pre_tx_ts) {
8074 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8075 base_flags |= TXD_FLAG_HWTSTAMP;
8076 tg3_read_tx_tstamp(tp, &tp->pre_tx_ts);
8078 tg3_full_unlock(tp);
8081 len = skb_headlen(skb);
8083 mapping = dma_map_single(&tp->pdev->dev, skb->data, len,
8085 if (dma_mapping_error(&tp->pdev->dev, mapping))
8089 tnapi->tx_buffers[entry].skb = skb;
8090 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8092 would_hit_hwbug = 0;
8094 if (tg3_flag(tp, 5701_DMA_BUG))
8095 would_hit_hwbug = 1;
8097 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8098 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8100 would_hit_hwbug = 1;
8101 } else if (skb_shinfo(skb)->nr_frags > 0) {
8104 if (!tg3_flag(tp, HW_TSO_1) &&
8105 !tg3_flag(tp, HW_TSO_2) &&
8106 !tg3_flag(tp, HW_TSO_3))
8109 /* Now loop through additional data
8110 * fragments, and queue them.
8112 last = skb_shinfo(skb)->nr_frags - 1;
8113 for (i = 0; i <= last; i++) {
8114 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8116 len = skb_frag_size(frag);
8117 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8118 len, DMA_TO_DEVICE);
8120 tnapi->tx_buffers[entry].skb = NULL;
8121 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8123 if (dma_mapping_error(&tp->pdev->dev, mapping))
8127 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8129 ((i == last) ? TXD_FLAG_END : 0),
8131 would_hit_hwbug = 1;
8137 if (would_hit_hwbug) {
8138 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8140 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8141 /* If it's a TSO packet, do GSO instead of
8142 * allocating and copying to a large linear SKB
8145 iph->check = ip_csum;
8146 iph->tot_len = ip_tot_len;
8148 tcph->check = tcp_csum;
8149 return tg3_tso_bug(tp, tnapi, txq, skb);
8152 /* If the workaround fails due to memory/mapping
8153 * failure, silently drop this packet.
8155 entry = tnapi->tx_prod;
8156 budget = tg3_tx_avail(tnapi);
8157 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8158 base_flags, mss, vlan))
8162 skb_tx_timestamp(skb);
8163 netdev_tx_sent_queue(txq, skb->len);
8165 /* Sync BD data before updating mailbox */
8168 tnapi->tx_prod = entry;
8169 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8170 netif_tx_stop_queue(txq);
8172 /* netif_tx_stop_queue() must be done before checking
8173 * checking tx index in tg3_tx_avail() below, because in
8174 * tg3_tx(), we update tx index before checking for
8175 * netif_tx_queue_stopped().
8178 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8179 netif_tx_wake_queue(txq);
8182 return NETDEV_TX_OK;
8185 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8186 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8188 dev_kfree_skb_any(skb);
8190 tnapi->tx_dropped++;
8191 return NETDEV_TX_OK;
8194 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
8196 struct netdev_queue *txq;
8197 u16 skb_queue_mapping;
8200 skb_queue_mapping = skb_get_queue_mapping(skb);
8201 txq = netdev_get_tx_queue(dev, skb_queue_mapping);
8203 ret = __tg3_start_xmit(skb, dev);
8205 /* Notify the hardware that packets are ready by updating the TX ring
8206 * tail pointer. We respect netdev_xmit_more() thus avoiding poking
8207 * the hardware for every packet. To guarantee forward progress the TX
8208 * ring must be drained when it is full as indicated by
8209 * netif_xmit_stopped(). This needs to happen even when the current
8210 * skb was dropped or rejected with NETDEV_TX_BUSY. Otherwise packets
8211 * queued by previous __tg3_start_xmit() calls might get stuck in
8212 * the queue forever.
8214 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
8215 struct tg3_napi *tnapi;
8218 tp = netdev_priv(dev);
8219 tnapi = &tp->napi[skb_queue_mapping];
8221 if (tg3_flag(tp, ENABLE_TSS))
8224 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
8230 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8233 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8234 MAC_MODE_PORT_MODE_MASK);
8236 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8238 if (!tg3_flag(tp, 5705_PLUS))
8239 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8241 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8242 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8244 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8246 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8248 if (tg3_flag(tp, 5705_PLUS) ||
8249 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8250 tg3_asic_rev(tp) == ASIC_REV_5700)
8251 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8254 tw32(MAC_MODE, tp->mac_mode);
8258 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8260 u32 val, bmcr, mac_mode, ptest = 0;
8262 tg3_phy_toggle_apd(tp, false);
8263 tg3_phy_toggle_automdix(tp, false);
8265 if (extlpbk && tg3_phy_set_extloopbk(tp))
8268 bmcr = BMCR_FULLDPLX;
8273 bmcr |= BMCR_SPEED100;
8277 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8279 bmcr |= BMCR_SPEED100;
8282 bmcr |= BMCR_SPEED1000;
8287 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8288 tg3_readphy(tp, MII_CTRL1000, &val);
8289 val |= CTL1000_AS_MASTER |
8290 CTL1000_ENABLE_MASTER;
8291 tg3_writephy(tp, MII_CTRL1000, val);
8293 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8294 MII_TG3_FET_PTEST_TRIM_2;
8295 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8298 bmcr |= BMCR_LOOPBACK;
8300 tg3_writephy(tp, MII_BMCR, bmcr);
8302 /* The write needs to be flushed for the FETs */
8303 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8304 tg3_readphy(tp, MII_BMCR, &bmcr);
8308 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8309 tg3_asic_rev(tp) == ASIC_REV_5785) {
8310 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8311 MII_TG3_FET_PTEST_FRC_TX_LINK |
8312 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8314 /* The write needs to be flushed for the AC131 */
8315 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8318 /* Reset to prevent losing 1st rx packet intermittently */
8319 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8320 tg3_flag(tp, 5780_CLASS)) {
8321 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8323 tw32_f(MAC_RX_MODE, tp->rx_mode);
8326 mac_mode = tp->mac_mode &
8327 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8328 if (speed == SPEED_1000)
8329 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8331 mac_mode |= MAC_MODE_PORT_MODE_MII;
8333 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8334 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8336 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8337 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8338 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8339 mac_mode |= MAC_MODE_LINK_POLARITY;
8341 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8342 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8345 tw32(MAC_MODE, mac_mode);
8351 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8353 struct tg3 *tp = netdev_priv(dev);
8355 if (features & NETIF_F_LOOPBACK) {
8356 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8359 spin_lock_bh(&tp->lock);
8360 tg3_mac_loopback(tp, true);
8361 netif_carrier_on(tp->dev);
8362 spin_unlock_bh(&tp->lock);
8363 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8365 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8368 spin_lock_bh(&tp->lock);
8369 tg3_mac_loopback(tp, false);
8370 /* Force link status check */
8371 tg3_setup_phy(tp, true);
8372 spin_unlock_bh(&tp->lock);
8373 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8377 static netdev_features_t tg3_fix_features(struct net_device *dev,
8378 netdev_features_t features)
8380 struct tg3 *tp = netdev_priv(dev);
8382 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8383 features &= ~NETIF_F_ALL_TSO;
8388 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8390 netdev_features_t changed = dev->features ^ features;
8392 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8393 tg3_set_loopback(dev, features);
8398 static void tg3_rx_prodring_free(struct tg3 *tp,
8399 struct tg3_rx_prodring_set *tpr)
8403 if (tpr != &tp->napi[0].prodring) {
8404 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8405 i = (i + 1) & tp->rx_std_ring_mask)
8406 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8409 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8410 for (i = tpr->rx_jmb_cons_idx;
8411 i != tpr->rx_jmb_prod_idx;
8412 i = (i + 1) & tp->rx_jmb_ring_mask) {
8413 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8421 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8422 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8425 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8426 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8427 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8432 /* Initialize rx rings for packet processing.
8434 * The chip has been shut down and the driver detached from
8435 * the networking, so no interrupts or new tx packets will
8436 * end up in the driver. tp->{tx,}lock are held and thus
8439 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8440 struct tg3_rx_prodring_set *tpr)
8442 u32 i, rx_pkt_dma_sz;
8444 tpr->rx_std_cons_idx = 0;
8445 tpr->rx_std_prod_idx = 0;
8446 tpr->rx_jmb_cons_idx = 0;
8447 tpr->rx_jmb_prod_idx = 0;
8449 if (tpr != &tp->napi[0].prodring) {
8450 memset(&tpr->rx_std_buffers[0], 0,
8451 TG3_RX_STD_BUFF_RING_SIZE(tp));
8452 if (tpr->rx_jmb_buffers)
8453 memset(&tpr->rx_jmb_buffers[0], 0,
8454 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8458 /* Zero out all descriptors. */
8459 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8461 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8462 if (tg3_flag(tp, 5780_CLASS) &&
8463 tp->dev->mtu > ETH_DATA_LEN)
8464 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8465 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8467 /* Initialize invariants of the rings, we only set this
8468 * stuff once. This works because the card does not
8469 * write into the rx buffer posting rings.
8471 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8472 struct tg3_rx_buffer_desc *rxd;
8474 rxd = &tpr->rx_std[i];
8475 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8476 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8477 rxd->opaque = (RXD_OPAQUE_RING_STD |
8478 (i << RXD_OPAQUE_INDEX_SHIFT));
8481 /* Now allocate fresh SKBs for each rx ring. */
8482 for (i = 0; i < tp->rx_pending; i++) {
8483 unsigned int frag_size;
8485 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8487 netdev_warn(tp->dev,
8488 "Using a smaller RX standard ring. Only "
8489 "%d out of %d buffers were allocated "
8490 "successfully\n", i, tp->rx_pending);
8498 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8501 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8503 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8506 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8507 struct tg3_rx_buffer_desc *rxd;
8509 rxd = &tpr->rx_jmb[i].std;
8510 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8511 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8513 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8514 (i << RXD_OPAQUE_INDEX_SHIFT));
8517 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8518 unsigned int frag_size;
8520 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8522 netdev_warn(tp->dev,
8523 "Using a smaller RX jumbo ring. Only %d "
8524 "out of %d buffers were allocated "
8525 "successfully\n", i, tp->rx_jumbo_pending);
8528 tp->rx_jumbo_pending = i;
8537 tg3_rx_prodring_free(tp, tpr);
8541 static void tg3_rx_prodring_fini(struct tg3 *tp,
8542 struct tg3_rx_prodring_set *tpr)
8544 kfree(tpr->rx_std_buffers);
8545 tpr->rx_std_buffers = NULL;
8546 kfree(tpr->rx_jmb_buffers);
8547 tpr->rx_jmb_buffers = NULL;
8549 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8550 tpr->rx_std, tpr->rx_std_mapping);
8554 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8555 tpr->rx_jmb, tpr->rx_jmb_mapping);
8560 static int tg3_rx_prodring_init(struct tg3 *tp,
8561 struct tg3_rx_prodring_set *tpr)
8563 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8565 if (!tpr->rx_std_buffers)
8568 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8569 TG3_RX_STD_RING_BYTES(tp),
8570 &tpr->rx_std_mapping,
8575 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8576 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8578 if (!tpr->rx_jmb_buffers)
8581 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8582 TG3_RX_JMB_RING_BYTES(tp),
8583 &tpr->rx_jmb_mapping,
8592 tg3_rx_prodring_fini(tp, tpr);
8596 /* Free up pending packets in all rx/tx rings.
8598 * The chip has been shut down and the driver detached from
8599 * the networking, so no interrupts or new tx packets will
8600 * end up in the driver. tp->{tx,}lock is not held and we are not
8601 * in an interrupt context and thus may sleep.
8603 static void tg3_free_rings(struct tg3 *tp)
8607 for (j = 0; j < tp->irq_cnt; j++) {
8608 struct tg3_napi *tnapi = &tp->napi[j];
8610 tg3_rx_prodring_free(tp, &tnapi->prodring);
8612 if (!tnapi->tx_buffers)
8615 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8616 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8621 tg3_tx_skb_unmap(tnapi, i,
8622 skb_shinfo(skb)->nr_frags - 1);
8624 dev_consume_skb_any(skb);
8626 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8630 /* Initialize tx/rx rings for packet processing.
8632 * The chip has been shut down and the driver detached from
8633 * the networking, so no interrupts or new tx packets will
8634 * end up in the driver. tp->{tx,}lock are held and thus
8637 static int tg3_init_rings(struct tg3 *tp)
8641 /* Free up all the SKBs. */
8644 for (i = 0; i < tp->irq_cnt; i++) {
8645 struct tg3_napi *tnapi = &tp->napi[i];
8647 tnapi->last_tag = 0;
8648 tnapi->last_irq_tag = 0;
8649 tnapi->hw_status->status = 0;
8650 tnapi->hw_status->status_tag = 0;
8651 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8656 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8658 tnapi->rx_rcb_ptr = 0;
8660 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8662 if (tnapi->prodring.rx_std &&
8663 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8672 static void tg3_mem_tx_release(struct tg3 *tp)
8676 for (i = 0; i < tp->irq_max; i++) {
8677 struct tg3_napi *tnapi = &tp->napi[i];
8679 if (tnapi->tx_ring) {
8680 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8681 tnapi->tx_ring, tnapi->tx_desc_mapping);
8682 tnapi->tx_ring = NULL;
8685 kfree(tnapi->tx_buffers);
8686 tnapi->tx_buffers = NULL;
8690 static int tg3_mem_tx_acquire(struct tg3 *tp)
8693 struct tg3_napi *tnapi = &tp->napi[0];
8695 /* If multivector TSS is enabled, vector 0 does not handle
8696 * tx interrupts. Don't allocate any resources for it.
8698 if (tg3_flag(tp, ENABLE_TSS))
8701 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8702 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8703 sizeof(struct tg3_tx_ring_info),
8705 if (!tnapi->tx_buffers)
8708 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8710 &tnapi->tx_desc_mapping,
8712 if (!tnapi->tx_ring)
8719 tg3_mem_tx_release(tp);
8723 static void tg3_mem_rx_release(struct tg3 *tp)
8727 for (i = 0; i < tp->irq_max; i++) {
8728 struct tg3_napi *tnapi = &tp->napi[i];
8730 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8735 dma_free_coherent(&tp->pdev->dev,
8736 TG3_RX_RCB_RING_BYTES(tp),
8738 tnapi->rx_rcb_mapping);
8739 tnapi->rx_rcb = NULL;
8743 static int tg3_mem_rx_acquire(struct tg3 *tp)
8745 unsigned int i, limit;
8747 limit = tp->rxq_cnt;
8749 /* If RSS is enabled, we need a (dummy) producer ring
8750 * set on vector zero. This is the true hw prodring.
8752 if (tg3_flag(tp, ENABLE_RSS))
8755 for (i = 0; i < limit; i++) {
8756 struct tg3_napi *tnapi = &tp->napi[i];
8758 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8761 /* If multivector RSS is enabled, vector 0
8762 * does not handle rx or tx interrupts.
8763 * Don't allocate any resources for it.
8765 if (!i && tg3_flag(tp, ENABLE_RSS))
8768 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8769 TG3_RX_RCB_RING_BYTES(tp),
8770 &tnapi->rx_rcb_mapping,
8779 tg3_mem_rx_release(tp);
8784 * Must not be invoked with interrupt sources disabled and
8785 * the hardware shutdown down.
8787 static void tg3_free_consistent(struct tg3 *tp)
8791 for (i = 0; i < tp->irq_cnt; i++) {
8792 struct tg3_napi *tnapi = &tp->napi[i];
8794 if (tnapi->hw_status) {
8795 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8797 tnapi->status_mapping);
8798 tnapi->hw_status = NULL;
8802 tg3_mem_rx_release(tp);
8803 tg3_mem_tx_release(tp);
8805 /* tp->hw_stats can be referenced safely:
8806 * 1. under rtnl_lock
8807 * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8810 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8811 tp->hw_stats, tp->stats_mapping);
8812 tp->hw_stats = NULL;
8817 * Must not be invoked with interrupt sources disabled and
8818 * the hardware shutdown down. Can sleep.
8820 static int tg3_alloc_consistent(struct tg3 *tp)
8824 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8825 sizeof(struct tg3_hw_stats),
8826 &tp->stats_mapping, GFP_KERNEL);
8830 for (i = 0; i < tp->irq_cnt; i++) {
8831 struct tg3_napi *tnapi = &tp->napi[i];
8832 struct tg3_hw_status *sblk;
8834 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8836 &tnapi->status_mapping,
8838 if (!tnapi->hw_status)
8841 sblk = tnapi->hw_status;
8843 if (tg3_flag(tp, ENABLE_RSS)) {
8844 u16 *prodptr = NULL;
8847 * When RSS is enabled, the status block format changes
8848 * slightly. The "rx_jumbo_consumer", "reserved",
8849 * and "rx_mini_consumer" members get mapped to the
8850 * other three rx return ring producer indexes.
8854 prodptr = &sblk->idx[0].rx_producer;
8857 prodptr = &sblk->rx_jumbo_consumer;
8860 prodptr = &sblk->reserved;
8863 prodptr = &sblk->rx_mini_consumer;
8866 tnapi->rx_rcb_prod_idx = prodptr;
8868 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8872 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8878 tg3_free_consistent(tp);
8882 #define MAX_WAIT_CNT 1000
8884 /* To stop a block, clear the enable bit and poll till it
8885 * clears. tp->lock is held.
8887 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8892 if (tg3_flag(tp, 5705_PLUS)) {
8899 /* We can't enable/disable these bits of the
8900 * 5705/5750, just say success.
8913 for (i = 0; i < MAX_WAIT_CNT; i++) {
8914 if (pci_channel_offline(tp->pdev)) {
8915 dev_err(&tp->pdev->dev,
8916 "tg3_stop_block device offline, "
8917 "ofs=%lx enable_bit=%x\n",
8924 if ((val & enable_bit) == 0)
8928 if (i == MAX_WAIT_CNT && !silent) {
8929 dev_err(&tp->pdev->dev,
8930 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8938 /* tp->lock is held. */
8939 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8943 tg3_disable_ints(tp);
8945 if (pci_channel_offline(tp->pdev)) {
8946 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8947 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8952 tp->rx_mode &= ~RX_MODE_ENABLE;
8953 tw32_f(MAC_RX_MODE, tp->rx_mode);
8956 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8957 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8958 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8959 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8960 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8961 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8963 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8964 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8965 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8966 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8967 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8968 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8969 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8971 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8972 tw32_f(MAC_MODE, tp->mac_mode);
8975 tp->tx_mode &= ~TX_MODE_ENABLE;
8976 tw32_f(MAC_TX_MODE, tp->tx_mode);
8978 for (i = 0; i < MAX_WAIT_CNT; i++) {
8980 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8983 if (i >= MAX_WAIT_CNT) {
8984 dev_err(&tp->pdev->dev,
8985 "%s timed out, TX_MODE_ENABLE will not clear "
8986 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8990 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8991 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8992 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8994 tw32(FTQ_RESET, 0xffffffff);
8995 tw32(FTQ_RESET, 0x00000000);
8997 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8998 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
9001 for (i = 0; i < tp->irq_cnt; i++) {
9002 struct tg3_napi *tnapi = &tp->napi[i];
9003 if (tnapi->hw_status)
9004 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9010 /* Save PCI command register before chip reset */
9011 static void tg3_save_pci_state(struct tg3 *tp)
9013 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
9016 /* Restore PCI state after chip reset */
9017 static void tg3_restore_pci_state(struct tg3 *tp)
9021 /* Re-enable indirect register accesses. */
9022 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9023 tp->misc_host_ctrl);
9025 /* Set MAX PCI retry to zero. */
9026 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
9027 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9028 tg3_flag(tp, PCIX_MODE))
9029 val |= PCISTATE_RETRY_SAME_DMA;
9030 /* Allow reads and writes to the APE register and memory space. */
9031 if (tg3_flag(tp, ENABLE_APE))
9032 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9033 PCISTATE_ALLOW_APE_SHMEM_WR |
9034 PCISTATE_ALLOW_APE_PSPACE_WR;
9035 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
9037 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
9039 if (!tg3_flag(tp, PCI_EXPRESS)) {
9040 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
9041 tp->pci_cacheline_sz);
9042 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
9046 /* Make sure PCI-X relaxed ordering bit is clear. */
9047 if (tg3_flag(tp, PCIX_MODE)) {
9050 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9052 pcix_cmd &= ~PCI_X_CMD_ERO;
9053 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9057 if (tg3_flag(tp, 5780_CLASS)) {
9059 /* Chip reset on 5780 will reset MSI enable bit,
9060 * so need to restore it.
9062 if (tg3_flag(tp, USING_MSI)) {
9065 pci_read_config_word(tp->pdev,
9066 tp->msi_cap + PCI_MSI_FLAGS,
9068 pci_write_config_word(tp->pdev,
9069 tp->msi_cap + PCI_MSI_FLAGS,
9070 ctrl | PCI_MSI_FLAGS_ENABLE);
9071 val = tr32(MSGINT_MODE);
9072 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9077 static void tg3_override_clk(struct tg3 *tp)
9081 switch (tg3_asic_rev(tp)) {
9083 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9084 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9085 TG3_CPMU_MAC_ORIDE_ENABLE);
9090 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9098 static void tg3_restore_clk(struct tg3 *tp)
9102 switch (tg3_asic_rev(tp)) {
9104 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9105 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9106 val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9111 val = tr32(TG3_CPMU_CLCK_ORIDE);
9112 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9120 /* tp->lock is held. */
9121 static int tg3_chip_reset(struct tg3 *tp)
9122 __releases(tp->lock)
9123 __acquires(tp->lock)
9126 void (*write_op)(struct tg3 *, u32, u32);
9129 if (!pci_device_is_present(tp->pdev))
9134 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9136 /* No matching tg3_nvram_unlock() after this because
9137 * chip reset below will undo the nvram lock.
9139 tp->nvram_lock_cnt = 0;
9141 /* GRC_MISC_CFG core clock reset will clear the memory
9142 * enable bit in PCI register 4 and the MSI enable bit
9143 * on some chips, so we save relevant registers here.
9145 tg3_save_pci_state(tp);
9147 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9148 tg3_flag(tp, 5755_PLUS))
9149 tw32(GRC_FASTBOOT_PC, 0);
9152 * We must avoid the readl() that normally takes place.
9153 * It locks machines, causes machine checks, and other
9154 * fun things. So, temporarily disable the 5701
9155 * hardware workaround, while we do the reset.
9157 write_op = tp->write32;
9158 if (write_op == tg3_write_flush_reg32)
9159 tp->write32 = tg3_write32;
9161 /* Prevent the irq handler from reading or writing PCI registers
9162 * during chip reset when the memory enable bit in the PCI command
9163 * register may be cleared. The chip does not generate interrupt
9164 * at this time, but the irq handler may still be called due to irq
9165 * sharing or irqpoll.
9167 tg3_flag_set(tp, CHIP_RESETTING);
9168 for (i = 0; i < tp->irq_cnt; i++) {
9169 struct tg3_napi *tnapi = &tp->napi[i];
9170 if (tnapi->hw_status) {
9171 tnapi->hw_status->status = 0;
9172 tnapi->hw_status->status_tag = 0;
9174 tnapi->last_tag = 0;
9175 tnapi->last_irq_tag = 0;
9179 tg3_full_unlock(tp);
9181 for (i = 0; i < tp->irq_cnt; i++)
9182 synchronize_irq(tp->napi[i].irq_vec);
9184 tg3_full_lock(tp, 0);
9186 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9187 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9188 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9192 val = GRC_MISC_CFG_CORECLK_RESET;
9194 if (tg3_flag(tp, PCI_EXPRESS)) {
9195 /* Force PCIe 1.0a mode */
9196 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9197 !tg3_flag(tp, 57765_PLUS) &&
9198 tr32(TG3_PCIE_PHY_TSTCTL) ==
9199 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9200 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9202 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9203 tw32(GRC_MISC_CFG, (1 << 29));
9208 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9209 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9210 tw32(GRC_VCPU_EXT_CTRL,
9211 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9214 /* Set the clock to the highest frequency to avoid timeouts. With link
9215 * aware mode, the clock speed could be slow and bootcode does not
9216 * complete within the expected time. Override the clock to allow the
9217 * bootcode to finish sooner and then restore it.
9219 tg3_override_clk(tp);
9221 /* Manage gphy power for all CPMU absent PCIe devices. */
9222 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9223 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9225 tw32(GRC_MISC_CFG, val);
9227 /* restore 5701 hardware bug workaround write method */
9228 tp->write32 = write_op;
9230 /* Unfortunately, we have to delay before the PCI read back.
9231 * Some 575X chips even will not respond to a PCI cfg access
9232 * when the reset command is given to the chip.
9234 * How do these hardware designers expect things to work
9235 * properly if the PCI write is posted for a long period
9236 * of time? It is always necessary to have some method by
9237 * which a register read back can occur to push the write
9238 * out which does the reset.
9240 * For most tg3 variants the trick below was working.
9245 /* Flush PCI posted writes. The normal MMIO registers
9246 * are inaccessible at this time so this is the only
9247 * way to make this reliably (actually, this is no longer
9248 * the case, see above). I tried to use indirect
9249 * register read/write but this upset some 5701 variants.
9251 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9255 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9258 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9262 /* Wait for link training to complete. */
9263 for (j = 0; j < 5000; j++)
9266 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9267 pci_write_config_dword(tp->pdev, 0xc4,
9268 cfg_val | (1 << 15));
9271 /* Clear the "no snoop" and "relaxed ordering" bits. */
9272 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9274 * Older PCIe devices only support the 128 byte
9275 * MPS setting. Enforce the restriction.
9277 if (!tg3_flag(tp, CPMU_PRESENT))
9278 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9279 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9281 /* Clear error status */
9282 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9283 PCI_EXP_DEVSTA_CED |
9284 PCI_EXP_DEVSTA_NFED |
9285 PCI_EXP_DEVSTA_FED |
9286 PCI_EXP_DEVSTA_URD);
9289 tg3_restore_pci_state(tp);
9291 tg3_flag_clear(tp, CHIP_RESETTING);
9292 tg3_flag_clear(tp, ERROR_PROCESSED);
9295 if (tg3_flag(tp, 5780_CLASS))
9296 val = tr32(MEMARB_MODE);
9297 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9299 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9301 tw32(0x5000, 0x400);
9304 if (tg3_flag(tp, IS_SSB_CORE)) {
9306 * BCM4785: In order to avoid repercussions from using
9307 * potentially defective internal ROM, stop the Rx RISC CPU,
9308 * which is not required.
9311 tg3_halt_cpu(tp, RX_CPU_BASE);
9314 err = tg3_poll_fw(tp);
9318 tw32(GRC_MODE, tp->grc_mode);
9320 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9323 tw32(0xc4, val | (1 << 15));
9326 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9327 tg3_asic_rev(tp) == ASIC_REV_5705) {
9328 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9329 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9330 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9331 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9334 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9335 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9337 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9338 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9343 tw32_f(MAC_MODE, val);
9346 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9350 if (tg3_flag(tp, PCI_EXPRESS) &&
9351 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9352 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9353 !tg3_flag(tp, 57765_PLUS)) {
9356 tw32(0x7c00, val | (1 << 25));
9359 tg3_restore_clk(tp);
9361 /* Increase the core clock speed to fix tx timeout issue for 5762
9362 * with 100Mbps link speed.
9364 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9365 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9366 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9367 TG3_CPMU_MAC_ORIDE_ENABLE);
9370 /* Reprobe ASF enable state. */
9371 tg3_flag_clear(tp, ENABLE_ASF);
9372 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9373 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9375 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9376 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9377 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9380 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9381 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9382 tg3_flag_set(tp, ENABLE_ASF);
9383 tp->last_event_jiffies = jiffies;
9384 if (tg3_flag(tp, 5750_PLUS))
9385 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9387 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9388 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9389 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9390 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9391 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9398 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9399 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9400 static void __tg3_set_rx_mode(struct net_device *);
9402 /* tp->lock is held. */
9403 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9409 tg3_write_sig_pre_reset(tp, kind);
9411 tg3_abort_hw(tp, silent);
9412 err = tg3_chip_reset(tp);
9414 __tg3_set_mac_addr(tp, false);
9416 tg3_write_sig_legacy(tp, kind);
9417 tg3_write_sig_post_reset(tp, kind);
9420 /* Save the stats across chip resets... */
9421 tg3_get_nstats(tp, &tp->net_stats_prev);
9422 tg3_get_estats(tp, &tp->estats_prev);
9424 /* And make sure the next sample is new data */
9425 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9427 for (i = 0; i < TG3_IRQ_MAX_VECS; ++i) {
9428 struct tg3_napi *tnapi = &tp->napi[i];
9430 tnapi->rx_dropped = 0;
9431 tnapi->tx_dropped = 0;
9438 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9440 struct tg3 *tp = netdev_priv(dev);
9441 struct sockaddr *addr = p;
9443 bool skip_mac_1 = false;
9445 if (!is_valid_ether_addr(addr->sa_data))
9446 return -EADDRNOTAVAIL;
9448 eth_hw_addr_set(dev, addr->sa_data);
9450 if (!netif_running(dev))
9453 if (tg3_flag(tp, ENABLE_ASF)) {
9454 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9456 addr0_high = tr32(MAC_ADDR_0_HIGH);
9457 addr0_low = tr32(MAC_ADDR_0_LOW);
9458 addr1_high = tr32(MAC_ADDR_1_HIGH);
9459 addr1_low = tr32(MAC_ADDR_1_LOW);
9461 /* Skip MAC addr 1 if ASF is using it. */
9462 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9463 !(addr1_high == 0 && addr1_low == 0))
9466 spin_lock_bh(&tp->lock);
9467 __tg3_set_mac_addr(tp, skip_mac_1);
9468 __tg3_set_rx_mode(dev);
9469 spin_unlock_bh(&tp->lock);
9474 /* tp->lock is held. */
9475 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9476 dma_addr_t mapping, u32 maxlen_flags,
9480 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9481 ((u64) mapping >> 32));
9483 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9484 ((u64) mapping & 0xffffffff));
9486 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9489 if (!tg3_flag(tp, 5705_PLUS))
9491 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9496 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9500 if (!tg3_flag(tp, ENABLE_TSS)) {
9501 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9502 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9503 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9505 tw32(HOSTCC_TXCOL_TICKS, 0);
9506 tw32(HOSTCC_TXMAX_FRAMES, 0);
9507 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9509 for (; i < tp->txq_cnt; i++) {
9512 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9513 tw32(reg, ec->tx_coalesce_usecs);
9514 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9515 tw32(reg, ec->tx_max_coalesced_frames);
9516 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9517 tw32(reg, ec->tx_max_coalesced_frames_irq);
9521 for (; i < tp->irq_max - 1; i++) {
9522 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9523 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9524 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9528 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9531 u32 limit = tp->rxq_cnt;
9533 if (!tg3_flag(tp, ENABLE_RSS)) {
9534 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9535 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9536 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9539 tw32(HOSTCC_RXCOL_TICKS, 0);
9540 tw32(HOSTCC_RXMAX_FRAMES, 0);
9541 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9544 for (; i < limit; i++) {
9547 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9548 tw32(reg, ec->rx_coalesce_usecs);
9549 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9550 tw32(reg, ec->rx_max_coalesced_frames);
9551 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9552 tw32(reg, ec->rx_max_coalesced_frames_irq);
9555 for (; i < tp->irq_max - 1; i++) {
9556 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9557 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9558 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9562 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9564 tg3_coal_tx_init(tp, ec);
9565 tg3_coal_rx_init(tp, ec);
9567 if (!tg3_flag(tp, 5705_PLUS)) {
9568 u32 val = ec->stats_block_coalesce_usecs;
9570 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9571 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9576 tw32(HOSTCC_STAT_COAL_TICKS, val);
9580 /* tp->lock is held. */
9581 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9585 /* Disable all transmit rings but the first. */
9586 if (!tg3_flag(tp, 5705_PLUS))
9587 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9588 else if (tg3_flag(tp, 5717_PLUS))
9589 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9590 else if (tg3_flag(tp, 57765_CLASS) ||
9591 tg3_asic_rev(tp) == ASIC_REV_5762)
9592 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9594 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9596 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9597 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9598 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9599 BDINFO_FLAGS_DISABLED);
9602 /* tp->lock is held. */
9603 static void tg3_tx_rcbs_init(struct tg3 *tp)
9606 u32 txrcb = NIC_SRAM_SEND_RCB;
9608 if (tg3_flag(tp, ENABLE_TSS))
9611 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9612 struct tg3_napi *tnapi = &tp->napi[i];
9614 if (!tnapi->tx_ring)
9617 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9618 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9619 NIC_SRAM_TX_BUFFER_DESC);
9623 /* tp->lock is held. */
9624 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9628 /* Disable all receive return rings but the first. */
9629 if (tg3_flag(tp, 5717_PLUS))
9630 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9631 else if (!tg3_flag(tp, 5705_PLUS))
9632 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9633 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9634 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9635 tg3_flag(tp, 57765_CLASS))
9636 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9638 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9640 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9641 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9642 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9643 BDINFO_FLAGS_DISABLED);
9646 /* tp->lock is held. */
9647 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9650 u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9652 if (tg3_flag(tp, ENABLE_RSS))
9655 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9656 struct tg3_napi *tnapi = &tp->napi[i];
9661 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9662 (tp->rx_ret_ring_mask + 1) <<
9663 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9667 /* tp->lock is held. */
9668 static void tg3_rings_reset(struct tg3 *tp)
9672 struct tg3_napi *tnapi = &tp->napi[0];
9674 tg3_tx_rcbs_disable(tp);
9676 tg3_rx_ret_rcbs_disable(tp);
9678 /* Disable interrupts */
9679 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9680 tp->napi[0].chk_msi_cnt = 0;
9681 tp->napi[0].last_rx_cons = 0;
9682 tp->napi[0].last_tx_cons = 0;
9684 /* Zero mailbox registers. */
9685 if (tg3_flag(tp, SUPPORT_MSIX)) {
9686 for (i = 1; i < tp->irq_max; i++) {
9687 tp->napi[i].tx_prod = 0;
9688 tp->napi[i].tx_cons = 0;
9689 if (tg3_flag(tp, ENABLE_TSS))
9690 tw32_mailbox(tp->napi[i].prodmbox, 0);
9691 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9692 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9693 tp->napi[i].chk_msi_cnt = 0;
9694 tp->napi[i].last_rx_cons = 0;
9695 tp->napi[i].last_tx_cons = 0;
9697 if (!tg3_flag(tp, ENABLE_TSS))
9698 tw32_mailbox(tp->napi[0].prodmbox, 0);
9700 tp->napi[0].tx_prod = 0;
9701 tp->napi[0].tx_cons = 0;
9702 tw32_mailbox(tp->napi[0].prodmbox, 0);
9703 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9706 /* Make sure the NIC-based send BD rings are disabled. */
9707 if (!tg3_flag(tp, 5705_PLUS)) {
9708 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9709 for (i = 0; i < 16; i++)
9710 tw32_tx_mbox(mbox + i * 8, 0);
9713 /* Clear status block in ram. */
9714 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9716 /* Set status block DMA address */
9717 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9718 ((u64) tnapi->status_mapping >> 32));
9719 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9720 ((u64) tnapi->status_mapping & 0xffffffff));
9722 stblk = HOSTCC_STATBLCK_RING1;
9724 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9725 u64 mapping = (u64)tnapi->status_mapping;
9726 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9727 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9730 /* Clear status block in ram. */
9731 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9734 tg3_tx_rcbs_init(tp);
9735 tg3_rx_ret_rcbs_init(tp);
9738 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9740 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9742 if (!tg3_flag(tp, 5750_PLUS) ||
9743 tg3_flag(tp, 5780_CLASS) ||
9744 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9745 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9746 tg3_flag(tp, 57765_PLUS))
9747 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9748 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9749 tg3_asic_rev(tp) == ASIC_REV_5787)
9750 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9752 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9754 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9755 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9757 val = min(nic_rep_thresh, host_rep_thresh);
9758 tw32(RCVBDI_STD_THRESH, val);
9760 if (tg3_flag(tp, 57765_PLUS))
9761 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9763 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9766 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9768 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9770 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9771 tw32(RCVBDI_JUMBO_THRESH, val);
9773 if (tg3_flag(tp, 57765_PLUS))
9774 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9777 static inline u32 calc_crc(unsigned char *buf, int len)
9785 for (j = 0; j < len; j++) {
9788 for (k = 0; k < 8; k++) {
9794 reg ^= CRC32_POLY_LE;
9801 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9803 /* accept or reject all multicast frames */
9804 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9805 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9806 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9807 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9810 static void __tg3_set_rx_mode(struct net_device *dev)
9812 struct tg3 *tp = netdev_priv(dev);
9815 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9816 RX_MODE_KEEP_VLAN_TAG);
9818 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9819 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9822 if (!tg3_flag(tp, ENABLE_ASF))
9823 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9826 if (dev->flags & IFF_PROMISC) {
9827 /* Promiscuous mode. */
9828 rx_mode |= RX_MODE_PROMISC;
9829 } else if (dev->flags & IFF_ALLMULTI) {
9830 /* Accept all multicast. */
9831 tg3_set_multi(tp, 1);
9832 } else if (netdev_mc_empty(dev)) {
9833 /* Reject all multicast. */
9834 tg3_set_multi(tp, 0);
9836 /* Accept one or more multicast(s). */
9837 struct netdev_hw_addr *ha;
9838 u32 mc_filter[4] = { 0, };
9843 netdev_for_each_mc_addr(ha, dev) {
9844 crc = calc_crc(ha->addr, ETH_ALEN);
9846 regidx = (bit & 0x60) >> 5;
9848 mc_filter[regidx] |= (1 << bit);
9851 tw32(MAC_HASH_REG_0, mc_filter[0]);
9852 tw32(MAC_HASH_REG_1, mc_filter[1]);
9853 tw32(MAC_HASH_REG_2, mc_filter[2]);
9854 tw32(MAC_HASH_REG_3, mc_filter[3]);
9857 if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9858 rx_mode |= RX_MODE_PROMISC;
9859 } else if (!(dev->flags & IFF_PROMISC)) {
9860 /* Add all entries into to the mac addr filter list */
9862 struct netdev_hw_addr *ha;
9864 netdev_for_each_uc_addr(ha, dev) {
9865 __tg3_set_one_mac_addr(tp, ha->addr,
9866 i + TG3_UCAST_ADDR_IDX(tp));
9871 if (rx_mode != tp->rx_mode) {
9872 tp->rx_mode = rx_mode;
9873 tw32_f(MAC_RX_MODE, rx_mode);
9878 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9882 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9883 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9886 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9890 if (!tg3_flag(tp, SUPPORT_MSIX))
9893 if (tp->rxq_cnt == 1) {
9894 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9898 /* Validate table against current IRQ count */
9899 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9900 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9904 if (i != TG3_RSS_INDIR_TBL_SIZE)
9905 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9908 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9911 u32 reg = MAC_RSS_INDIR_TBL_0;
9913 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9914 u32 val = tp->rss_ind_tbl[i];
9916 for (; i % 8; i++) {
9918 val |= tp->rss_ind_tbl[i];
9925 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9927 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9928 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9930 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9933 /* tp->lock is held. */
9934 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9936 u32 val, rdmac_mode;
9938 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9940 tg3_disable_ints(tp);
9944 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9946 if (tg3_flag(tp, INIT_COMPLETE))
9947 tg3_abort_hw(tp, 1);
9949 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9950 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9951 tg3_phy_pull_config(tp);
9952 tg3_eee_pull_config(tp, NULL);
9953 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9956 /* Enable MAC control of LPI */
9957 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9963 err = tg3_chip_reset(tp);
9967 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9969 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9970 val = tr32(TG3_CPMU_CTRL);
9971 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9972 tw32(TG3_CPMU_CTRL, val);
9974 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9975 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9976 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9977 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9979 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9980 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9981 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9982 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9984 val = tr32(TG3_CPMU_HST_ACC);
9985 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9986 val |= CPMU_HST_ACC_MACCLK_6_25;
9987 tw32(TG3_CPMU_HST_ACC, val);
9990 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9991 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9992 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9993 PCIE_PWR_MGMT_L1_THRESH_4MS;
9994 tw32(PCIE_PWR_MGMT_THRESH, val);
9996 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9997 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9999 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
10001 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
10002 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
10005 if (tg3_flag(tp, L1PLLPD_EN)) {
10006 u32 grc_mode = tr32(GRC_MODE);
10008 /* Access the lower 1K of PL PCIE block registers. */
10009 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10010 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
10012 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
10013 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
10014 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
10016 tw32(GRC_MODE, grc_mode);
10019 if (tg3_flag(tp, 57765_CLASS)) {
10020 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
10021 u32 grc_mode = tr32(GRC_MODE);
10023 /* Access the lower 1K of PL PCIE block registers. */
10024 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10025 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
10027 val = tr32(TG3_PCIE_TLDLPL_PORT +
10028 TG3_PCIE_PL_LO_PHYCTL5);
10029 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
10030 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
10032 tw32(GRC_MODE, grc_mode);
10035 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
10038 /* Fix transmit hangs */
10039 val = tr32(TG3_CPMU_PADRNG_CTL);
10040 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
10041 tw32(TG3_CPMU_PADRNG_CTL, val);
10043 grc_mode = tr32(GRC_MODE);
10045 /* Access the lower 1K of DL PCIE block registers. */
10046 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10047 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
10049 val = tr32(TG3_PCIE_TLDLPL_PORT +
10050 TG3_PCIE_DL_LO_FTSMAX);
10051 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
10052 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
10053 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
10055 tw32(GRC_MODE, grc_mode);
10058 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
10059 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
10060 val |= CPMU_LSPD_10MB_MACCLK_6_25;
10061 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
10064 /* This works around an issue with Athlon chipsets on
10065 * B3 tigon3 silicon. This bit has no effect on any
10066 * other revision. But do not set this on PCI Express
10067 * chips and don't even touch the clocks if the CPMU is present.
10069 if (!tg3_flag(tp, CPMU_PRESENT)) {
10070 if (!tg3_flag(tp, PCI_EXPRESS))
10071 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
10072 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
10075 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
10076 tg3_flag(tp, PCIX_MODE)) {
10077 val = tr32(TG3PCI_PCISTATE);
10078 val |= PCISTATE_RETRY_SAME_DMA;
10079 tw32(TG3PCI_PCISTATE, val);
10082 if (tg3_flag(tp, ENABLE_APE)) {
10083 /* Allow reads and writes to the
10084 * APE register and memory space.
10086 val = tr32(TG3PCI_PCISTATE);
10087 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10088 PCISTATE_ALLOW_APE_SHMEM_WR |
10089 PCISTATE_ALLOW_APE_PSPACE_WR;
10090 tw32(TG3PCI_PCISTATE, val);
10093 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10094 /* Enable some hw fixes. */
10095 val = tr32(TG3PCI_MSI_DATA);
10096 val |= (1 << 26) | (1 << 28) | (1 << 29);
10097 tw32(TG3PCI_MSI_DATA, val);
10100 /* Descriptor ring init may make accesses to the
10101 * NIC SRAM area to setup the TX descriptors, so we
10102 * can only do this after the hardware has been
10103 * successfully reset.
10105 err = tg3_init_rings(tp);
10109 if (tg3_flag(tp, 57765_PLUS)) {
10110 val = tr32(TG3PCI_DMA_RW_CTRL) &
10111 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10112 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10113 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10114 if (!tg3_flag(tp, 57765_CLASS) &&
10115 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10116 tg3_asic_rev(tp) != ASIC_REV_5762)
10117 val |= DMA_RWCTRL_TAGGED_STAT_WA;
10118 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10119 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10120 tg3_asic_rev(tp) != ASIC_REV_5761) {
10121 /* This value is determined during the probe time DMA
10122 * engine test, tg3_test_dma.
10124 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10127 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10128 GRC_MODE_4X_NIC_SEND_RINGS |
10129 GRC_MODE_NO_TX_PHDR_CSUM |
10130 GRC_MODE_NO_RX_PHDR_CSUM);
10131 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10133 /* Pseudo-header checksum is done by hardware logic and not
10134 * the offload processers, so make the chip do the pseudo-
10135 * header checksums on receive. For transmit it is more
10136 * convenient to do the pseudo-header checksum in software
10137 * as Linux does that on transmit for us in all cases.
10139 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10141 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10143 tw32(TG3_RX_PTP_CTL,
10144 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10146 if (tg3_flag(tp, PTP_CAPABLE))
10147 val |= GRC_MODE_TIME_SYNC_ENABLE;
10149 tw32(GRC_MODE, tp->grc_mode | val);
10151 /* On one of the AMD platform, MRRS is restricted to 4000 because of
10152 * south bridge limitation. As a workaround, Driver is setting MRRS
10153 * to 2048 instead of default 4096.
10155 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10156 tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10157 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10158 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10161 /* Setup the timer prescalar register. Clock is always 66Mhz. */
10162 val = tr32(GRC_MISC_CFG);
10164 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10165 tw32(GRC_MISC_CFG, val);
10167 /* Initialize MBUF/DESC pool. */
10168 if (tg3_flag(tp, 5750_PLUS)) {
10170 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10171 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10172 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10173 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10175 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10176 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10177 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10178 } else if (tg3_flag(tp, TSO_CAPABLE)) {
10181 fw_len = tp->fw_len;
10182 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10183 tw32(BUFMGR_MB_POOL_ADDR,
10184 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10185 tw32(BUFMGR_MB_POOL_SIZE,
10186 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10189 if (tp->dev->mtu <= ETH_DATA_LEN) {
10190 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10191 tp->bufmgr_config.mbuf_read_dma_low_water);
10192 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10193 tp->bufmgr_config.mbuf_mac_rx_low_water);
10194 tw32(BUFMGR_MB_HIGH_WATER,
10195 tp->bufmgr_config.mbuf_high_water);
10197 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10198 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10199 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10200 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10201 tw32(BUFMGR_MB_HIGH_WATER,
10202 tp->bufmgr_config.mbuf_high_water_jumbo);
10204 tw32(BUFMGR_DMA_LOW_WATER,
10205 tp->bufmgr_config.dma_low_water);
10206 tw32(BUFMGR_DMA_HIGH_WATER,
10207 tp->bufmgr_config.dma_high_water);
10209 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10210 if (tg3_asic_rev(tp) == ASIC_REV_5719)
10211 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10212 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10213 tg3_asic_rev(tp) == ASIC_REV_5762 ||
10214 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10215 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10216 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10217 tw32(BUFMGR_MODE, val);
10218 for (i = 0; i < 2000; i++) {
10219 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10224 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10228 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10229 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10231 tg3_setup_rxbd_thresholds(tp);
10233 /* Initialize TG3_BDINFO's at:
10234 * RCVDBDI_STD_BD: standard eth size rx ring
10235 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
10236 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
10239 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
10240 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
10241 * ring attribute flags
10242 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
10244 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10245 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10247 * The size of each ring is fixed in the firmware, but the location is
10250 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10251 ((u64) tpr->rx_std_mapping >> 32));
10252 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10253 ((u64) tpr->rx_std_mapping & 0xffffffff));
10254 if (!tg3_flag(tp, 5717_PLUS))
10255 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10256 NIC_SRAM_RX_BUFFER_DESC);
10258 /* Disable the mini ring */
10259 if (!tg3_flag(tp, 5705_PLUS))
10260 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10261 BDINFO_FLAGS_DISABLED);
10263 /* Program the jumbo buffer descriptor ring control
10264 * blocks on those devices that have them.
10266 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10267 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10269 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10270 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10271 ((u64) tpr->rx_jmb_mapping >> 32));
10272 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10273 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10274 val = TG3_RX_JMB_RING_SIZE(tp) <<
10275 BDINFO_FLAGS_MAXLEN_SHIFT;
10276 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10277 val | BDINFO_FLAGS_USE_EXT_RECV);
10278 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10279 tg3_flag(tp, 57765_CLASS) ||
10280 tg3_asic_rev(tp) == ASIC_REV_5762)
10281 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10282 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10284 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10285 BDINFO_FLAGS_DISABLED);
10288 if (tg3_flag(tp, 57765_PLUS)) {
10289 val = TG3_RX_STD_RING_SIZE(tp);
10290 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10291 val |= (TG3_RX_STD_DMA_SZ << 2);
10293 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10295 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10297 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10299 tpr->rx_std_prod_idx = tp->rx_pending;
10300 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10302 tpr->rx_jmb_prod_idx =
10303 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10304 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10306 tg3_rings_reset(tp);
10308 /* Initialize MAC address and backoff seed. */
10309 __tg3_set_mac_addr(tp, false);
10311 /* MTU + ethernet header + FCS + optional VLAN tag */
10312 tw32(MAC_RX_MTU_SIZE,
10313 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10315 /* The slot time is changed by tg3_setup_phy if we
10316 * run at gigabit with half duplex.
10318 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10319 (6 << TX_LENGTHS_IPG_SHIFT) |
10320 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10322 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10323 tg3_asic_rev(tp) == ASIC_REV_5762)
10324 val |= tr32(MAC_TX_LENGTHS) &
10325 (TX_LENGTHS_JMB_FRM_LEN_MSK |
10326 TX_LENGTHS_CNT_DWN_VAL_MSK);
10328 tw32(MAC_TX_LENGTHS, val);
10330 /* Receive rules. */
10331 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10332 tw32(RCVLPC_CONFIG, 0x0181);
10334 /* Calculate RDMAC_MODE setting early, we need it to determine
10335 * the RCVLPC_STATE_ENABLE mask.
10337 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10338 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10339 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10340 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10341 RDMAC_MODE_LNGREAD_ENAB);
10343 if (tg3_asic_rev(tp) == ASIC_REV_5717)
10344 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10346 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10347 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10348 tg3_asic_rev(tp) == ASIC_REV_57780)
10349 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10350 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10351 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10353 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10354 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10355 if (tg3_flag(tp, TSO_CAPABLE)) {
10356 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10357 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10358 !tg3_flag(tp, IS_5788)) {
10359 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10363 if (tg3_flag(tp, PCI_EXPRESS))
10364 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10366 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10368 if (tp->dev->mtu <= ETH_DATA_LEN) {
10369 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10370 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10374 if (tg3_flag(tp, HW_TSO_1) ||
10375 tg3_flag(tp, HW_TSO_2) ||
10376 tg3_flag(tp, HW_TSO_3))
10377 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10379 if (tg3_flag(tp, 57765_PLUS) ||
10380 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10381 tg3_asic_rev(tp) == ASIC_REV_57780)
10382 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10384 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10385 tg3_asic_rev(tp) == ASIC_REV_5762)
10386 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10388 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10389 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10390 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10391 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10392 tg3_flag(tp, 57765_PLUS)) {
10395 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10396 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10398 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10400 val = tr32(tgtreg);
10401 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10402 tg3_asic_rev(tp) == ASIC_REV_5762) {
10403 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10404 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10405 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10406 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10407 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10408 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10410 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10413 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10414 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10415 tg3_asic_rev(tp) == ASIC_REV_5762) {
10418 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10419 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10421 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10423 val = tr32(tgtreg);
10425 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10426 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10429 /* Receive/send statistics. */
10430 if (tg3_flag(tp, 5750_PLUS)) {
10431 val = tr32(RCVLPC_STATS_ENABLE);
10432 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10433 tw32(RCVLPC_STATS_ENABLE, val);
10434 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10435 tg3_flag(tp, TSO_CAPABLE)) {
10436 val = tr32(RCVLPC_STATS_ENABLE);
10437 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10438 tw32(RCVLPC_STATS_ENABLE, val);
10440 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10442 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10443 tw32(SNDDATAI_STATSENAB, 0xffffff);
10444 tw32(SNDDATAI_STATSCTRL,
10445 (SNDDATAI_SCTRL_ENABLE |
10446 SNDDATAI_SCTRL_FASTUPD));
10448 /* Setup host coalescing engine. */
10449 tw32(HOSTCC_MODE, 0);
10450 for (i = 0; i < 2000; i++) {
10451 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10456 __tg3_set_coalesce(tp, &tp->coal);
10458 if (!tg3_flag(tp, 5705_PLUS)) {
10459 /* Status/statistics block address. See tg3_timer,
10460 * the tg3_periodic_fetch_stats call there, and
10461 * tg3_get_stats to see how this works for 5705/5750 chips.
10463 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10464 ((u64) tp->stats_mapping >> 32));
10465 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10466 ((u64) tp->stats_mapping & 0xffffffff));
10467 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10469 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10471 /* Clear statistics and status block memory areas */
10472 for (i = NIC_SRAM_STATS_BLK;
10473 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10474 i += sizeof(u32)) {
10475 tg3_write_mem(tp, i, 0);
10480 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10482 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10483 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10484 if (!tg3_flag(tp, 5705_PLUS))
10485 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10487 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10488 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10489 /* reset to prevent losing 1st rx packet intermittently */
10490 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10494 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10495 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10496 MAC_MODE_FHDE_ENABLE;
10497 if (tg3_flag(tp, ENABLE_APE))
10498 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10499 if (!tg3_flag(tp, 5705_PLUS) &&
10500 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10501 tg3_asic_rev(tp) != ASIC_REV_5700)
10502 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10503 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10506 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10507 * If TG3_FLAG_IS_NIC is zero, we should read the
10508 * register to preserve the GPIO settings for LOMs. The GPIOs,
10509 * whether used as inputs or outputs, are set by boot code after
10512 if (!tg3_flag(tp, IS_NIC)) {
10515 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10516 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10517 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10519 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10520 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10521 GRC_LCLCTRL_GPIO_OUTPUT3;
10523 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10524 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10526 tp->grc_local_ctrl &= ~gpio_mask;
10527 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10529 /* GPIO1 must be driven high for eeprom write protect */
10530 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10531 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10532 GRC_LCLCTRL_GPIO_OUTPUT1);
10534 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10537 if (tg3_flag(tp, USING_MSIX)) {
10538 val = tr32(MSGINT_MODE);
10539 val |= MSGINT_MODE_ENABLE;
10540 if (tp->irq_cnt > 1)
10541 val |= MSGINT_MODE_MULTIVEC_EN;
10542 if (!tg3_flag(tp, 1SHOT_MSI))
10543 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10544 tw32(MSGINT_MODE, val);
10547 if (!tg3_flag(tp, 5705_PLUS)) {
10548 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10552 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10553 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10554 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10555 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10556 WDMAC_MODE_LNGREAD_ENAB);
10558 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10559 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10560 if (tg3_flag(tp, TSO_CAPABLE) &&
10561 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10562 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10564 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10565 !tg3_flag(tp, IS_5788)) {
10566 val |= WDMAC_MODE_RX_ACCEL;
10570 /* Enable host coalescing bug fix */
10571 if (tg3_flag(tp, 5755_PLUS))
10572 val |= WDMAC_MODE_STATUS_TAG_FIX;
10574 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10575 val |= WDMAC_MODE_BURST_ALL_DATA;
10577 tw32_f(WDMAC_MODE, val);
10580 if (tg3_flag(tp, PCIX_MODE)) {
10583 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10585 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10586 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10587 pcix_cmd |= PCI_X_CMD_READ_2K;
10588 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10589 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10590 pcix_cmd |= PCI_X_CMD_READ_2K;
10592 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10596 tw32_f(RDMAC_MODE, rdmac_mode);
10599 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10600 tg3_asic_rev(tp) == ASIC_REV_5720) {
10601 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10602 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10605 if (i < TG3_NUM_RDMA_CHANNELS) {
10606 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10607 val |= tg3_lso_rd_dma_workaround_bit(tp);
10608 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10609 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10613 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10614 if (!tg3_flag(tp, 5705_PLUS))
10615 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10617 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10618 tw32(SNDDATAC_MODE,
10619 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10621 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10623 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10624 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10625 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10626 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10627 val |= RCVDBDI_MODE_LRG_RING_SZ;
10628 tw32(RCVDBDI_MODE, val);
10629 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10630 if (tg3_flag(tp, HW_TSO_1) ||
10631 tg3_flag(tp, HW_TSO_2) ||
10632 tg3_flag(tp, HW_TSO_3))
10633 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10634 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10635 if (tg3_flag(tp, ENABLE_TSS))
10636 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10637 tw32(SNDBDI_MODE, val);
10638 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10640 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10641 err = tg3_load_5701_a0_firmware_fix(tp);
10646 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10647 /* Ignore any errors for the firmware download. If download
10648 * fails, the device will operate with EEE disabled
10650 tg3_load_57766_firmware(tp);
10653 if (tg3_flag(tp, TSO_CAPABLE)) {
10654 err = tg3_load_tso_firmware(tp);
10659 tp->tx_mode = TX_MODE_ENABLE;
10661 if (tg3_flag(tp, 5755_PLUS) ||
10662 tg3_asic_rev(tp) == ASIC_REV_5906)
10663 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10665 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10666 tg3_asic_rev(tp) == ASIC_REV_5762) {
10667 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10668 tp->tx_mode &= ~val;
10669 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10672 tw32_f(MAC_TX_MODE, tp->tx_mode);
10675 if (tg3_flag(tp, ENABLE_RSS)) {
10678 tg3_rss_write_indir_tbl(tp);
10680 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10682 for (i = 0; i < 10 ; i++)
10683 tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10686 tp->rx_mode = RX_MODE_ENABLE;
10687 if (tg3_flag(tp, 5755_PLUS))
10688 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10690 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10691 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10693 if (tg3_flag(tp, ENABLE_RSS))
10694 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10695 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10696 RX_MODE_RSS_IPV6_HASH_EN |
10697 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10698 RX_MODE_RSS_IPV4_HASH_EN |
10699 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10701 tw32_f(MAC_RX_MODE, tp->rx_mode);
10704 tw32(MAC_LED_CTRL, tp->led_ctrl);
10706 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10707 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10708 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10711 tw32_f(MAC_RX_MODE, tp->rx_mode);
10714 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10715 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10716 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10717 /* Set drive transmission level to 1.2V */
10718 /* only if the signal pre-emphasis bit is not set */
10719 val = tr32(MAC_SERDES_CFG);
10722 tw32(MAC_SERDES_CFG, val);
10724 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10725 tw32(MAC_SERDES_CFG, 0x616000);
10728 /* Prevent chip from dropping frames when flow control
10731 if (tg3_flag(tp, 57765_CLASS))
10735 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10737 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10738 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10739 /* Use hardware link auto-negotiation */
10740 tg3_flag_set(tp, HW_AUTONEG);
10743 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10744 tg3_asic_rev(tp) == ASIC_REV_5714) {
10747 tmp = tr32(SERDES_RX_CTRL);
10748 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10749 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10750 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10751 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10754 if (!tg3_flag(tp, USE_PHYLIB)) {
10755 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10756 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10758 err = tg3_setup_phy(tp, false);
10762 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10763 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10766 /* Clear CRC stats. */
10767 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10768 tg3_writephy(tp, MII_TG3_TEST1,
10769 tmp | MII_TG3_TEST1_CRC_EN);
10770 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10775 __tg3_set_rx_mode(tp->dev);
10777 /* Initialize receive rules. */
10778 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10779 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10780 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10781 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10783 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10787 if (tg3_flag(tp, ENABLE_ASF))
10791 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10794 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10797 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10800 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10803 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10806 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10809 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10812 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10815 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10818 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10821 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10824 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10827 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10829 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10837 if (tg3_flag(tp, ENABLE_APE))
10838 /* Write our heartbeat update interval to APE. */
10839 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10840 APE_HOST_HEARTBEAT_INT_5SEC);
10842 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10847 /* Called at device open time to get the chip ready for
10848 * packet processing. Invoked with tp->lock held.
10850 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10852 /* Chip may have been just powered on. If so, the boot code may still
10853 * be running initialization. Wait for it to finish to avoid races in
10854 * accessing the hardware.
10856 tg3_enable_register_access(tp);
10859 tg3_switch_clocks(tp);
10861 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10863 return tg3_reset_hw(tp, reset_phy);
10866 #ifdef CONFIG_TIGON3_HWMON
10867 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10869 u32 off, len = TG3_OCIR_LEN;
10872 for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) {
10873 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10875 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10876 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10877 memset(ocir, 0, len);
10881 /* sysfs attributes for hwmon */
10882 static ssize_t tg3_show_temp(struct device *dev,
10883 struct device_attribute *devattr, char *buf)
10885 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10886 struct tg3 *tp = dev_get_drvdata(dev);
10889 spin_lock_bh(&tp->lock);
10890 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10891 sizeof(temperature));
10892 spin_unlock_bh(&tp->lock);
10893 return sprintf(buf, "%u\n", temperature * 1000);
10897 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10898 TG3_TEMP_SENSOR_OFFSET);
10899 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10900 TG3_TEMP_CAUTION_OFFSET);
10901 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10902 TG3_TEMP_MAX_OFFSET);
10904 static struct attribute *tg3_attrs[] = {
10905 &sensor_dev_attr_temp1_input.dev_attr.attr,
10906 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10907 &sensor_dev_attr_temp1_max.dev_attr.attr,
10910 ATTRIBUTE_GROUPS(tg3);
10912 static void tg3_hwmon_close(struct tg3 *tp)
10914 if (tp->hwmon_dev) {
10915 hwmon_device_unregister(tp->hwmon_dev);
10916 tp->hwmon_dev = NULL;
10920 static void tg3_hwmon_open(struct tg3 *tp)
10924 struct pci_dev *pdev = tp->pdev;
10925 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10927 tg3_sd_scan_scratchpad(tp, ocirs);
10929 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10930 if (!ocirs[i].src_data_length)
10933 size += ocirs[i].src_hdr_length;
10934 size += ocirs[i].src_data_length;
10940 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10942 if (IS_ERR(tp->hwmon_dev)) {
10943 tp->hwmon_dev = NULL;
10944 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10948 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10949 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10950 #endif /* CONFIG_TIGON3_HWMON */
10953 #define TG3_STAT_ADD32(PSTAT, REG) \
10954 do { u32 __val = tr32(REG); \
10955 (PSTAT)->low += __val; \
10956 if ((PSTAT)->low < __val) \
10957 (PSTAT)->high += 1; \
10960 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10962 struct tg3_hw_stats *sp = tp->hw_stats;
10967 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10968 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10969 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10970 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10971 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10972 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10973 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10974 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10975 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10976 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10977 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10978 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10979 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10980 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10981 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10982 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10985 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10986 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10987 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10988 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10991 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10992 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10993 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10994 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10995 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10996 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10997 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10998 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10999 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
11000 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
11001 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
11002 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
11003 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
11004 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
11006 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
11007 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
11008 tg3_asic_rev(tp) != ASIC_REV_5762 &&
11009 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
11010 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
11011 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
11013 u32 val = tr32(HOSTCC_FLOW_ATTN);
11014 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
11016 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
11017 sp->rx_discards.low += val;
11018 if (sp->rx_discards.low < val)
11019 sp->rx_discards.high += 1;
11021 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
11023 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
11026 static void tg3_chk_missed_msi(struct tg3 *tp)
11030 for (i = 0; i < tp->irq_cnt; i++) {
11031 struct tg3_napi *tnapi = &tp->napi[i];
11033 if (tg3_has_work(tnapi)) {
11034 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
11035 tnapi->last_tx_cons == tnapi->tx_cons) {
11036 if (tnapi->chk_msi_cnt < 1) {
11037 tnapi->chk_msi_cnt++;
11043 tnapi->chk_msi_cnt = 0;
11044 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
11045 tnapi->last_tx_cons = tnapi->tx_cons;
11049 static void tg3_timer(struct timer_list *t)
11051 struct tg3 *tp = from_timer(tp, t, timer);
11053 spin_lock(&tp->lock);
11055 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
11056 spin_unlock(&tp->lock);
11057 goto restart_timer;
11060 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
11061 tg3_flag(tp, 57765_CLASS))
11062 tg3_chk_missed_msi(tp);
11064 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
11065 /* BCM4785: Flush posted writes from GbE to host memory. */
11069 if (!tg3_flag(tp, TAGGED_STATUS)) {
11070 /* All of this garbage is because when using non-tagged
11071 * IRQ status the mailbox/status_block protocol the chip
11072 * uses with the cpu is race prone.
11074 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
11075 tw32(GRC_LOCAL_CTRL,
11076 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
11078 tw32(HOSTCC_MODE, tp->coalesce_mode |
11079 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
11082 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11083 spin_unlock(&tp->lock);
11084 tg3_reset_task_schedule(tp);
11085 goto restart_timer;
11089 /* This part only runs once per second. */
11090 if (!--tp->timer_counter) {
11091 if (tg3_flag(tp, 5705_PLUS))
11092 tg3_periodic_fetch_stats(tp);
11094 if (tp->setlpicnt && !--tp->setlpicnt)
11095 tg3_phy_eee_enable(tp);
11097 if (tg3_flag(tp, USE_LINKCHG_REG)) {
11101 mac_stat = tr32(MAC_STATUS);
11104 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11105 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11107 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11111 tg3_setup_phy(tp, false);
11112 } else if (tg3_flag(tp, POLL_SERDES)) {
11113 u32 mac_stat = tr32(MAC_STATUS);
11114 int need_setup = 0;
11117 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11120 if (!tp->link_up &&
11121 (mac_stat & (MAC_STATUS_PCS_SYNCED |
11122 MAC_STATUS_SIGNAL_DET))) {
11126 if (!tp->serdes_counter) {
11129 ~MAC_MODE_PORT_MODE_MASK));
11131 tw32_f(MAC_MODE, tp->mac_mode);
11134 tg3_setup_phy(tp, false);
11136 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11137 tg3_flag(tp, 5780_CLASS)) {
11138 tg3_serdes_parallel_detect(tp);
11139 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11140 u32 cpmu = tr32(TG3_CPMU_STATUS);
11141 bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11142 TG3_CPMU_STATUS_LINK_MASK);
11144 if (link_up != tp->link_up)
11145 tg3_setup_phy(tp, false);
11148 tp->timer_counter = tp->timer_multiplier;
11151 /* Heartbeat is only sent once every 2 seconds.
11153 * The heartbeat is to tell the ASF firmware that the host
11154 * driver is still alive. In the event that the OS crashes,
11155 * ASF needs to reset the hardware to free up the FIFO space
11156 * that may be filled with rx packets destined for the host.
11157 * If the FIFO is full, ASF will no longer function properly.
11159 * Unintended resets have been reported on real time kernels
11160 * where the timer doesn't run on time. Netpoll will also have
11163 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11164 * to check the ring condition when the heartbeat is expiring
11165 * before doing the reset. This will prevent most unintended
11168 if (!--tp->asf_counter) {
11169 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11170 tg3_wait_for_event_ack(tp);
11172 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11173 FWCMD_NICDRV_ALIVE3);
11174 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11175 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11176 TG3_FW_UPDATE_TIMEOUT_SEC);
11178 tg3_generate_fw_event(tp);
11180 tp->asf_counter = tp->asf_multiplier;
11183 /* Update the APE heartbeat every 5 seconds.*/
11184 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11186 spin_unlock(&tp->lock);
11189 tp->timer.expires = jiffies + tp->timer_offset;
11190 add_timer(&tp->timer);
11193 static void tg3_timer_init(struct tg3 *tp)
11195 if (tg3_flag(tp, TAGGED_STATUS) &&
11196 tg3_asic_rev(tp) != ASIC_REV_5717 &&
11197 !tg3_flag(tp, 57765_CLASS))
11198 tp->timer_offset = HZ;
11200 tp->timer_offset = HZ / 10;
11202 BUG_ON(tp->timer_offset > HZ);
11204 tp->timer_multiplier = (HZ / tp->timer_offset);
11205 tp->asf_multiplier = (HZ / tp->timer_offset) *
11206 TG3_FW_UPDATE_FREQ_SEC;
11208 timer_setup(&tp->timer, tg3_timer, 0);
11211 static void tg3_timer_start(struct tg3 *tp)
11213 tp->asf_counter = tp->asf_multiplier;
11214 tp->timer_counter = tp->timer_multiplier;
11216 tp->timer.expires = jiffies + tp->timer_offset;
11217 add_timer(&tp->timer);
11220 static void tg3_timer_stop(struct tg3 *tp)
11222 del_timer_sync(&tp->timer);
11225 /* Restart hardware after configuration changes, self-test, etc.
11226 * Invoked with tp->lock held.
11228 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11229 __releases(tp->lock)
11230 __acquires(tp->lock)
11234 err = tg3_init_hw(tp, reset_phy);
11236 netdev_err(tp->dev,
11237 "Failed to re-initialize device, aborting\n");
11238 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11239 tg3_full_unlock(tp);
11240 tg3_timer_stop(tp);
11242 tg3_napi_enable(tp);
11243 dev_close(tp->dev);
11244 tg3_full_lock(tp, 0);
11249 static void tg3_reset_task(struct work_struct *work)
11251 struct tg3 *tp = container_of(work, struct tg3, reset_task);
11255 tg3_full_lock(tp, 0);
11257 if (tp->pcierr_recovery || !netif_running(tp->dev) ||
11258 tp->pdev->error_state != pci_channel_io_normal) {
11259 tg3_flag_clear(tp, RESET_TASK_PENDING);
11260 tg3_full_unlock(tp);
11265 tg3_full_unlock(tp);
11269 tg3_netif_stop(tp);
11271 tg3_full_lock(tp, 1);
11273 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11274 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11275 tp->write32_rx_mbox = tg3_write_flush_reg32;
11276 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11277 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11280 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11281 err = tg3_init_hw(tp, true);
11283 tg3_full_unlock(tp);
11285 tg3_napi_enable(tp);
11286 /* Clear this flag so that tg3_reset_task_cancel() will not
11287 * call cancel_work_sync() and wait forever.
11289 tg3_flag_clear(tp, RESET_TASK_PENDING);
11290 dev_close(tp->dev);
11294 tg3_netif_start(tp);
11295 tg3_full_unlock(tp);
11297 tg3_flag_clear(tp, RESET_TASK_PENDING);
11302 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11305 unsigned long flags;
11307 struct tg3_napi *tnapi = &tp->napi[irq_num];
11309 if (tp->irq_cnt == 1)
11310 name = tp->dev->name;
11312 name = &tnapi->irq_lbl[0];
11313 if (tnapi->tx_buffers && tnapi->rx_rcb)
11314 snprintf(name, IFNAMSIZ,
11315 "%s-txrx-%d", tp->dev->name, irq_num);
11316 else if (tnapi->tx_buffers)
11317 snprintf(name, IFNAMSIZ,
11318 "%s-tx-%d", tp->dev->name, irq_num);
11319 else if (tnapi->rx_rcb)
11320 snprintf(name, IFNAMSIZ,
11321 "%s-rx-%d", tp->dev->name, irq_num);
11323 snprintf(name, IFNAMSIZ,
11324 "%s-%d", tp->dev->name, irq_num);
11325 name[IFNAMSIZ-1] = 0;
11328 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11330 if (tg3_flag(tp, 1SHOT_MSI))
11331 fn = tg3_msi_1shot;
11334 fn = tg3_interrupt;
11335 if (tg3_flag(tp, TAGGED_STATUS))
11336 fn = tg3_interrupt_tagged;
11337 flags = IRQF_SHARED;
11340 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11343 static int tg3_test_interrupt(struct tg3 *tp)
11345 struct tg3_napi *tnapi = &tp->napi[0];
11346 struct net_device *dev = tp->dev;
11347 int err, i, intr_ok = 0;
11350 if (!netif_running(dev))
11353 tg3_disable_ints(tp);
11355 free_irq(tnapi->irq_vec, tnapi);
11358 * Turn off MSI one shot mode. Otherwise this test has no
11359 * observable way to know whether the interrupt was delivered.
11361 if (tg3_flag(tp, 57765_PLUS)) {
11362 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11363 tw32(MSGINT_MODE, val);
11366 err = request_irq(tnapi->irq_vec, tg3_test_isr,
11367 IRQF_SHARED, dev->name, tnapi);
11371 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11372 tg3_enable_ints(tp);
11374 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11377 for (i = 0; i < 5; i++) {
11378 u32 int_mbox, misc_host_ctrl;
11380 int_mbox = tr32_mailbox(tnapi->int_mbox);
11381 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11383 if ((int_mbox != 0) ||
11384 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11389 if (tg3_flag(tp, 57765_PLUS) &&
11390 tnapi->hw_status->status_tag != tnapi->last_tag)
11391 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11396 tg3_disable_ints(tp);
11398 free_irq(tnapi->irq_vec, tnapi);
11400 err = tg3_request_irq(tp, 0);
11406 /* Reenable MSI one shot mode. */
11407 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11408 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11409 tw32(MSGINT_MODE, val);
11417 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11418 * successfully restored
11420 static int tg3_test_msi(struct tg3 *tp)
11425 if (!tg3_flag(tp, USING_MSI))
11428 /* Turn off SERR reporting in case MSI terminates with Master
11431 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11432 pci_write_config_word(tp->pdev, PCI_COMMAND,
11433 pci_cmd & ~PCI_COMMAND_SERR);
11435 err = tg3_test_interrupt(tp);
11437 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11442 /* other failures */
11446 /* MSI test failed, go back to INTx mode */
11447 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11448 "to INTx mode. Please report this failure to the PCI "
11449 "maintainer and include system chipset information\n");
11451 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11453 pci_disable_msi(tp->pdev);
11455 tg3_flag_clear(tp, USING_MSI);
11456 tp->napi[0].irq_vec = tp->pdev->irq;
11458 err = tg3_request_irq(tp, 0);
11462 /* Need to reset the chip because the MSI cycle may have terminated
11463 * with Master Abort.
11465 tg3_full_lock(tp, 1);
11467 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11468 err = tg3_init_hw(tp, true);
11470 tg3_full_unlock(tp);
11473 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11478 static int tg3_request_firmware(struct tg3 *tp)
11480 const struct tg3_firmware_hdr *fw_hdr;
11482 if (reject_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11483 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11488 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11490 /* Firmware blob starts with version numbers, followed by
11491 * start address and _full_ length including BSS sections
11492 * (which must be longer than the actual data, of course
11495 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11496 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11497 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11498 tp->fw_len, tp->fw_needed);
11499 release_firmware(tp->fw);
11504 /* We no longer need firmware; we have it. */
11505 tp->fw_needed = NULL;
11509 static u32 tg3_irq_count(struct tg3 *tp)
11511 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11514 /* We want as many rx rings enabled as there are cpus.
11515 * In multiqueue MSI-X mode, the first MSI-X vector
11516 * only deals with link interrupts, etc, so we add
11517 * one to the number of vectors we are requesting.
11519 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11525 static bool tg3_enable_msix(struct tg3 *tp)
11528 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11530 tp->txq_cnt = tp->txq_req;
11531 tp->rxq_cnt = tp->rxq_req;
11533 tp->rxq_cnt = netif_get_num_default_rss_queues();
11534 if (tp->rxq_cnt > tp->rxq_max)
11535 tp->rxq_cnt = tp->rxq_max;
11537 /* Disable multiple TX rings by default. Simple round-robin hardware
11538 * scheduling of the TX rings can cause starvation of rings with
11539 * small packets when other rings have TSO or jumbo packets.
11544 tp->irq_cnt = tg3_irq_count(tp);
11546 for (i = 0; i < tp->irq_max; i++) {
11547 msix_ent[i].entry = i;
11548 msix_ent[i].vector = 0;
11551 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11554 } else if (rc < tp->irq_cnt) {
11555 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11558 tp->rxq_cnt = max(rc - 1, 1);
11560 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11563 for (i = 0; i < tp->irq_max; i++)
11564 tp->napi[i].irq_vec = msix_ent[i].vector;
11566 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11567 pci_disable_msix(tp->pdev);
11571 if (tp->irq_cnt == 1)
11574 tg3_flag_set(tp, ENABLE_RSS);
11576 if (tp->txq_cnt > 1)
11577 tg3_flag_set(tp, ENABLE_TSS);
11579 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11584 static void tg3_ints_init(struct tg3 *tp)
11586 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11587 !tg3_flag(tp, TAGGED_STATUS)) {
11588 /* All MSI supporting chips should support tagged
11589 * status. Assert that this is the case.
11591 netdev_warn(tp->dev,
11592 "MSI without TAGGED_STATUS? Not using MSI\n");
11596 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11597 tg3_flag_set(tp, USING_MSIX);
11598 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11599 tg3_flag_set(tp, USING_MSI);
11601 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11602 u32 msi_mode = tr32(MSGINT_MODE);
11603 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11604 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11605 if (!tg3_flag(tp, 1SHOT_MSI))
11606 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11607 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11610 if (!tg3_flag(tp, USING_MSIX)) {
11612 tp->napi[0].irq_vec = tp->pdev->irq;
11615 if (tp->irq_cnt == 1) {
11618 netif_set_real_num_tx_queues(tp->dev, 1);
11619 netif_set_real_num_rx_queues(tp->dev, 1);
11623 static void tg3_ints_fini(struct tg3 *tp)
11625 if (tg3_flag(tp, USING_MSIX))
11626 pci_disable_msix(tp->pdev);
11627 else if (tg3_flag(tp, USING_MSI))
11628 pci_disable_msi(tp->pdev);
11629 tg3_flag_clear(tp, USING_MSI);
11630 tg3_flag_clear(tp, USING_MSIX);
11631 tg3_flag_clear(tp, ENABLE_RSS);
11632 tg3_flag_clear(tp, ENABLE_TSS);
11635 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11638 struct net_device *dev = tp->dev;
11642 * Setup interrupts first so we know how
11643 * many NAPI resources to allocate
11647 tg3_rss_check_indir_tbl(tp);
11649 /* The placement of this call is tied
11650 * to the setup and use of Host TX descriptors.
11652 err = tg3_alloc_consistent(tp);
11654 goto out_ints_fini;
11658 tg3_napi_enable(tp);
11660 for (i = 0; i < tp->irq_cnt; i++) {
11661 err = tg3_request_irq(tp, i);
11663 for (i--; i >= 0; i--) {
11664 struct tg3_napi *tnapi = &tp->napi[i];
11666 free_irq(tnapi->irq_vec, tnapi);
11668 goto out_napi_fini;
11672 tg3_full_lock(tp, 0);
11675 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11677 err = tg3_init_hw(tp, reset_phy);
11679 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11680 tg3_free_rings(tp);
11683 tg3_full_unlock(tp);
11688 if (test_irq && tg3_flag(tp, USING_MSI)) {
11689 err = tg3_test_msi(tp);
11692 tg3_full_lock(tp, 0);
11693 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11694 tg3_free_rings(tp);
11695 tg3_full_unlock(tp);
11697 goto out_napi_fini;
11700 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11701 u32 val = tr32(PCIE_TRANSACTION_CFG);
11703 tw32(PCIE_TRANSACTION_CFG,
11704 val | PCIE_TRANS_CFG_1SHOT_MSI);
11710 tg3_hwmon_open(tp);
11712 tg3_full_lock(tp, 0);
11714 tg3_timer_start(tp);
11715 tg3_flag_set(tp, INIT_COMPLETE);
11716 tg3_enable_ints(tp);
11718 tg3_ptp_resume(tp);
11720 tg3_full_unlock(tp);
11722 netif_tx_start_all_queues(dev);
11725 * Reset loopback feature if it was turned on while the device was down
11726 * make sure that it's installed properly now.
11728 if (dev->features & NETIF_F_LOOPBACK)
11729 tg3_set_loopback(dev, dev->features);
11734 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11735 struct tg3_napi *tnapi = &tp->napi[i];
11736 free_irq(tnapi->irq_vec, tnapi);
11740 tg3_napi_disable(tp);
11742 tg3_free_consistent(tp);
11750 static void tg3_stop(struct tg3 *tp)
11754 tg3_reset_task_cancel(tp);
11755 tg3_netif_stop(tp);
11757 tg3_timer_stop(tp);
11759 tg3_hwmon_close(tp);
11763 tg3_full_lock(tp, 1);
11765 tg3_disable_ints(tp);
11767 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11768 tg3_free_rings(tp);
11769 tg3_flag_clear(tp, INIT_COMPLETE);
11771 tg3_full_unlock(tp);
11773 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11774 struct tg3_napi *tnapi = &tp->napi[i];
11775 free_irq(tnapi->irq_vec, tnapi);
11782 tg3_free_consistent(tp);
11785 static int tg3_open(struct net_device *dev)
11787 struct tg3 *tp = netdev_priv(dev);
11790 if (tp->pcierr_recovery) {
11791 netdev_err(dev, "Failed to open device. PCI error recovery "
11796 if (tp->fw_needed) {
11797 err = tg3_request_firmware(tp);
11798 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11800 netdev_warn(tp->dev, "EEE capability disabled\n");
11801 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11802 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11803 netdev_warn(tp->dev, "EEE capability restored\n");
11804 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11806 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11810 netdev_warn(tp->dev, "TSO capability disabled\n");
11811 tg3_flag_clear(tp, TSO_CAPABLE);
11812 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11813 netdev_notice(tp->dev, "TSO capability restored\n");
11814 tg3_flag_set(tp, TSO_CAPABLE);
11818 tg3_carrier_off(tp);
11820 err = tg3_power_up(tp);
11824 tg3_full_lock(tp, 0);
11826 tg3_disable_ints(tp);
11827 tg3_flag_clear(tp, INIT_COMPLETE);
11829 tg3_full_unlock(tp);
11831 err = tg3_start(tp,
11832 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11835 tg3_frob_aux_power(tp, false);
11836 pci_set_power_state(tp->pdev, PCI_D3hot);
11842 static int tg3_close(struct net_device *dev)
11844 struct tg3 *tp = netdev_priv(dev);
11846 if (tp->pcierr_recovery) {
11847 netdev_err(dev, "Failed to close device. PCI error recovery "
11854 if (pci_device_is_present(tp->pdev)) {
11855 tg3_power_down_prepare(tp);
11857 tg3_carrier_off(tp);
11862 static inline u64 get_stat64(tg3_stat64_t *val)
11864 return ((u64)val->high << 32) | ((u64)val->low);
11867 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11869 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11871 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11872 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11873 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11876 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11877 tg3_writephy(tp, MII_TG3_TEST1,
11878 val | MII_TG3_TEST1_CRC_EN);
11879 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11883 tp->phy_crc_errors += val;
11885 return tp->phy_crc_errors;
11888 return get_stat64(&hw_stats->rx_fcs_errors);
11891 #define ESTAT_ADD(member) \
11892 estats->member = old_estats->member + \
11893 get_stat64(&hw_stats->member)
11895 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11897 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11898 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11900 ESTAT_ADD(rx_octets);
11901 ESTAT_ADD(rx_fragments);
11902 ESTAT_ADD(rx_ucast_packets);
11903 ESTAT_ADD(rx_mcast_packets);
11904 ESTAT_ADD(rx_bcast_packets);
11905 ESTAT_ADD(rx_fcs_errors);
11906 ESTAT_ADD(rx_align_errors);
11907 ESTAT_ADD(rx_xon_pause_rcvd);
11908 ESTAT_ADD(rx_xoff_pause_rcvd);
11909 ESTAT_ADD(rx_mac_ctrl_rcvd);
11910 ESTAT_ADD(rx_xoff_entered);
11911 ESTAT_ADD(rx_frame_too_long_errors);
11912 ESTAT_ADD(rx_jabbers);
11913 ESTAT_ADD(rx_undersize_packets);
11914 ESTAT_ADD(rx_in_length_errors);
11915 ESTAT_ADD(rx_out_length_errors);
11916 ESTAT_ADD(rx_64_or_less_octet_packets);
11917 ESTAT_ADD(rx_65_to_127_octet_packets);
11918 ESTAT_ADD(rx_128_to_255_octet_packets);
11919 ESTAT_ADD(rx_256_to_511_octet_packets);
11920 ESTAT_ADD(rx_512_to_1023_octet_packets);
11921 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11922 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11923 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11924 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11925 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11927 ESTAT_ADD(tx_octets);
11928 ESTAT_ADD(tx_collisions);
11929 ESTAT_ADD(tx_xon_sent);
11930 ESTAT_ADD(tx_xoff_sent);
11931 ESTAT_ADD(tx_flow_control);
11932 ESTAT_ADD(tx_mac_errors);
11933 ESTAT_ADD(tx_single_collisions);
11934 ESTAT_ADD(tx_mult_collisions);
11935 ESTAT_ADD(tx_deferred);
11936 ESTAT_ADD(tx_excessive_collisions);
11937 ESTAT_ADD(tx_late_collisions);
11938 ESTAT_ADD(tx_collide_2times);
11939 ESTAT_ADD(tx_collide_3times);
11940 ESTAT_ADD(tx_collide_4times);
11941 ESTAT_ADD(tx_collide_5times);
11942 ESTAT_ADD(tx_collide_6times);
11943 ESTAT_ADD(tx_collide_7times);
11944 ESTAT_ADD(tx_collide_8times);
11945 ESTAT_ADD(tx_collide_9times);
11946 ESTAT_ADD(tx_collide_10times);
11947 ESTAT_ADD(tx_collide_11times);
11948 ESTAT_ADD(tx_collide_12times);
11949 ESTAT_ADD(tx_collide_13times);
11950 ESTAT_ADD(tx_collide_14times);
11951 ESTAT_ADD(tx_collide_15times);
11952 ESTAT_ADD(tx_ucast_packets);
11953 ESTAT_ADD(tx_mcast_packets);
11954 ESTAT_ADD(tx_bcast_packets);
11955 ESTAT_ADD(tx_carrier_sense_errors);
11956 ESTAT_ADD(tx_discards);
11957 ESTAT_ADD(tx_errors);
11959 ESTAT_ADD(dma_writeq_full);
11960 ESTAT_ADD(dma_write_prioq_full);
11961 ESTAT_ADD(rxbds_empty);
11962 ESTAT_ADD(rx_discards);
11963 ESTAT_ADD(rx_errors);
11964 ESTAT_ADD(rx_threshold_hit);
11966 ESTAT_ADD(dma_readq_full);
11967 ESTAT_ADD(dma_read_prioq_full);
11968 ESTAT_ADD(tx_comp_queue_full);
11970 ESTAT_ADD(ring_set_send_prod_index);
11971 ESTAT_ADD(ring_status_update);
11972 ESTAT_ADD(nic_irqs);
11973 ESTAT_ADD(nic_avoided_irqs);
11974 ESTAT_ADD(nic_tx_threshold_hit);
11976 ESTAT_ADD(mbuf_lwm_thresh_hit);
11979 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11981 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11982 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11983 unsigned long rx_dropped;
11984 unsigned long tx_dropped;
11987 stats->rx_packets = old_stats->rx_packets +
11988 get_stat64(&hw_stats->rx_ucast_packets) +
11989 get_stat64(&hw_stats->rx_mcast_packets) +
11990 get_stat64(&hw_stats->rx_bcast_packets);
11992 stats->tx_packets = old_stats->tx_packets +
11993 get_stat64(&hw_stats->tx_ucast_packets) +
11994 get_stat64(&hw_stats->tx_mcast_packets) +
11995 get_stat64(&hw_stats->tx_bcast_packets);
11997 stats->rx_bytes = old_stats->rx_bytes +
11998 get_stat64(&hw_stats->rx_octets);
11999 stats->tx_bytes = old_stats->tx_bytes +
12000 get_stat64(&hw_stats->tx_octets);
12002 stats->rx_errors = old_stats->rx_errors +
12003 get_stat64(&hw_stats->rx_errors);
12004 stats->tx_errors = old_stats->tx_errors +
12005 get_stat64(&hw_stats->tx_errors) +
12006 get_stat64(&hw_stats->tx_mac_errors) +
12007 get_stat64(&hw_stats->tx_carrier_sense_errors) +
12008 get_stat64(&hw_stats->tx_discards);
12010 stats->multicast = old_stats->multicast +
12011 get_stat64(&hw_stats->rx_mcast_packets);
12012 stats->collisions = old_stats->collisions +
12013 get_stat64(&hw_stats->tx_collisions);
12015 stats->rx_length_errors = old_stats->rx_length_errors +
12016 get_stat64(&hw_stats->rx_frame_too_long_errors) +
12017 get_stat64(&hw_stats->rx_undersize_packets);
12019 stats->rx_frame_errors = old_stats->rx_frame_errors +
12020 get_stat64(&hw_stats->rx_align_errors);
12021 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
12022 get_stat64(&hw_stats->tx_discards);
12023 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
12024 get_stat64(&hw_stats->tx_carrier_sense_errors);
12026 stats->rx_crc_errors = old_stats->rx_crc_errors +
12027 tg3_calc_crc_errors(tp);
12029 stats->rx_missed_errors = old_stats->rx_missed_errors +
12030 get_stat64(&hw_stats->rx_discards);
12032 /* Aggregate per-queue counters. The per-queue counters are updated
12033 * by a single writer, race-free. The result computed by this loop
12034 * might not be 100% accurate (counters can be updated in the middle of
12035 * the loop) but the next tg3_get_nstats() will recompute the current
12036 * value so it is acceptable.
12038 * Note that these counters wrap around at 4G on 32bit machines.
12040 rx_dropped = (unsigned long)(old_stats->rx_dropped);
12041 tx_dropped = (unsigned long)(old_stats->tx_dropped);
12043 for (i = 0; i < tp->irq_cnt; i++) {
12044 struct tg3_napi *tnapi = &tp->napi[i];
12046 rx_dropped += tnapi->rx_dropped;
12047 tx_dropped += tnapi->tx_dropped;
12050 stats->rx_dropped = rx_dropped;
12051 stats->tx_dropped = tx_dropped;
12054 static int tg3_get_regs_len(struct net_device *dev)
12056 return TG3_REG_BLK_SIZE;
12059 static void tg3_get_regs(struct net_device *dev,
12060 struct ethtool_regs *regs, void *_p)
12062 struct tg3 *tp = netdev_priv(dev);
12066 memset(_p, 0, TG3_REG_BLK_SIZE);
12068 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12071 tg3_full_lock(tp, 0);
12073 tg3_dump_legacy_regs(tp, (u32 *)_p);
12075 tg3_full_unlock(tp);
12078 static int tg3_get_eeprom_len(struct net_device *dev)
12080 struct tg3 *tp = netdev_priv(dev);
12082 return tp->nvram_size;
12085 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12087 struct tg3 *tp = netdev_priv(dev);
12088 int ret, cpmu_restore = 0;
12090 u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
12093 if (tg3_flag(tp, NO_NVRAM))
12096 offset = eeprom->offset;
12100 eeprom->magic = TG3_EEPROM_MAGIC;
12102 /* Override clock, link aware and link idle modes */
12103 if (tg3_flag(tp, CPMU_PRESENT)) {
12104 cpmu_val = tr32(TG3_CPMU_CTRL);
12105 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
12106 CPMU_CTRL_LINK_IDLE_MODE)) {
12107 tw32(TG3_CPMU_CTRL, cpmu_val &
12108 ~(CPMU_CTRL_LINK_AWARE_MODE |
12109 CPMU_CTRL_LINK_IDLE_MODE));
12113 tg3_override_clk(tp);
12116 /* adjustments to start on required 4 byte boundary */
12117 b_offset = offset & 3;
12118 b_count = 4 - b_offset;
12119 if (b_count > len) {
12120 /* i.e. offset=1 len=2 */
12123 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12126 memcpy(data, ((char *)&val) + b_offset, b_count);
12129 eeprom->len += b_count;
12132 /* read bytes up to the last 4 byte boundary */
12133 pd = &data[eeprom->len];
12134 for (i = 0; i < (len - (len & 3)); i += 4) {
12135 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12142 memcpy(pd + i, &val, 4);
12143 if (need_resched()) {
12144 if (signal_pending(current)) {
12155 /* read last bytes not ending on 4 byte boundary */
12156 pd = &data[eeprom->len];
12158 b_offset = offset + len - b_count;
12159 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12162 memcpy(pd, &val, b_count);
12163 eeprom->len += b_count;
12168 /* Restore clock, link aware and link idle modes */
12169 tg3_restore_clk(tp);
12171 tw32(TG3_CPMU_CTRL, cpmu_val);
12176 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12178 struct tg3 *tp = netdev_priv(dev);
12180 u32 offset, len, b_offset, odd_len;
12182 __be32 start = 0, end;
12184 if (tg3_flag(tp, NO_NVRAM) ||
12185 eeprom->magic != TG3_EEPROM_MAGIC)
12188 offset = eeprom->offset;
12191 if ((b_offset = (offset & 3))) {
12192 /* adjustments to start on required 4 byte boundary */
12193 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12204 /* adjustments to end on required 4 byte boundary */
12206 len = (len + 3) & ~3;
12207 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12213 if (b_offset || odd_len) {
12214 buf = kmalloc(len, GFP_KERNEL);
12218 memcpy(buf, &start, 4);
12220 memcpy(buf+len-4, &end, 4);
12221 memcpy(buf + b_offset, data, eeprom->len);
12224 ret = tg3_nvram_write_block(tp, offset, len, buf);
12232 static int tg3_get_link_ksettings(struct net_device *dev,
12233 struct ethtool_link_ksettings *cmd)
12235 struct tg3 *tp = netdev_priv(dev);
12236 u32 supported, advertising;
12238 if (tg3_flag(tp, USE_PHYLIB)) {
12239 struct phy_device *phydev;
12240 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12242 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12243 phy_ethtool_ksettings_get(phydev, cmd);
12248 supported = (SUPPORTED_Autoneg);
12250 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12251 supported |= (SUPPORTED_1000baseT_Half |
12252 SUPPORTED_1000baseT_Full);
12254 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12255 supported |= (SUPPORTED_100baseT_Half |
12256 SUPPORTED_100baseT_Full |
12257 SUPPORTED_10baseT_Half |
12258 SUPPORTED_10baseT_Full |
12260 cmd->base.port = PORT_TP;
12262 supported |= SUPPORTED_FIBRE;
12263 cmd->base.port = PORT_FIBRE;
12265 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12268 advertising = tp->link_config.advertising;
12269 if (tg3_flag(tp, PAUSE_AUTONEG)) {
12270 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12271 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12272 advertising |= ADVERTISED_Pause;
12274 advertising |= ADVERTISED_Pause |
12275 ADVERTISED_Asym_Pause;
12277 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12278 advertising |= ADVERTISED_Asym_Pause;
12281 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12284 if (netif_running(dev) && tp->link_up) {
12285 cmd->base.speed = tp->link_config.active_speed;
12286 cmd->base.duplex = tp->link_config.active_duplex;
12287 ethtool_convert_legacy_u32_to_link_mode(
12288 cmd->link_modes.lp_advertising,
12289 tp->link_config.rmt_adv);
12291 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12292 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12293 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12295 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12298 cmd->base.speed = SPEED_UNKNOWN;
12299 cmd->base.duplex = DUPLEX_UNKNOWN;
12300 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12302 cmd->base.phy_address = tp->phy_addr;
12303 cmd->base.autoneg = tp->link_config.autoneg;
12307 static int tg3_set_link_ksettings(struct net_device *dev,
12308 const struct ethtool_link_ksettings *cmd)
12310 struct tg3 *tp = netdev_priv(dev);
12311 u32 speed = cmd->base.speed;
12314 if (tg3_flag(tp, USE_PHYLIB)) {
12315 struct phy_device *phydev;
12316 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12318 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12319 return phy_ethtool_ksettings_set(phydev, cmd);
12322 if (cmd->base.autoneg != AUTONEG_ENABLE &&
12323 cmd->base.autoneg != AUTONEG_DISABLE)
12326 if (cmd->base.autoneg == AUTONEG_DISABLE &&
12327 cmd->base.duplex != DUPLEX_FULL &&
12328 cmd->base.duplex != DUPLEX_HALF)
12331 ethtool_convert_link_mode_to_legacy_u32(&advertising,
12332 cmd->link_modes.advertising);
12334 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12335 u32 mask = ADVERTISED_Autoneg |
12337 ADVERTISED_Asym_Pause;
12339 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12340 mask |= ADVERTISED_1000baseT_Half |
12341 ADVERTISED_1000baseT_Full;
12343 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12344 mask |= ADVERTISED_100baseT_Half |
12345 ADVERTISED_100baseT_Full |
12346 ADVERTISED_10baseT_Half |
12347 ADVERTISED_10baseT_Full |
12350 mask |= ADVERTISED_FIBRE;
12352 if (advertising & ~mask)
12355 mask &= (ADVERTISED_1000baseT_Half |
12356 ADVERTISED_1000baseT_Full |
12357 ADVERTISED_100baseT_Half |
12358 ADVERTISED_100baseT_Full |
12359 ADVERTISED_10baseT_Half |
12360 ADVERTISED_10baseT_Full);
12362 advertising &= mask;
12364 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12365 if (speed != SPEED_1000)
12368 if (cmd->base.duplex != DUPLEX_FULL)
12371 if (speed != SPEED_100 &&
12377 tg3_full_lock(tp, 0);
12379 tp->link_config.autoneg = cmd->base.autoneg;
12380 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12381 tp->link_config.advertising = (advertising |
12382 ADVERTISED_Autoneg);
12383 tp->link_config.speed = SPEED_UNKNOWN;
12384 tp->link_config.duplex = DUPLEX_UNKNOWN;
12386 tp->link_config.advertising = 0;
12387 tp->link_config.speed = speed;
12388 tp->link_config.duplex = cmd->base.duplex;
12391 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12393 tg3_warn_mgmt_link_flap(tp);
12395 if (netif_running(dev))
12396 tg3_setup_phy(tp, true);
12398 tg3_full_unlock(tp);
12403 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12405 struct tg3 *tp = netdev_priv(dev);
12407 strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12408 strscpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12409 strscpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12412 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12414 struct tg3 *tp = netdev_priv(dev);
12416 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12417 wol->supported = WAKE_MAGIC;
12419 wol->supported = 0;
12421 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12422 wol->wolopts = WAKE_MAGIC;
12423 memset(&wol->sopass, 0, sizeof(wol->sopass));
12426 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12428 struct tg3 *tp = netdev_priv(dev);
12429 struct device *dp = &tp->pdev->dev;
12431 if (wol->wolopts & ~WAKE_MAGIC)
12433 if ((wol->wolopts & WAKE_MAGIC) &&
12434 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12437 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12439 if (device_may_wakeup(dp))
12440 tg3_flag_set(tp, WOL_ENABLE);
12442 tg3_flag_clear(tp, WOL_ENABLE);
12447 static u32 tg3_get_msglevel(struct net_device *dev)
12449 struct tg3 *tp = netdev_priv(dev);
12450 return tp->msg_enable;
12453 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12455 struct tg3 *tp = netdev_priv(dev);
12456 tp->msg_enable = value;
12459 static int tg3_nway_reset(struct net_device *dev)
12461 struct tg3 *tp = netdev_priv(dev);
12464 if (!netif_running(dev))
12467 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12470 tg3_warn_mgmt_link_flap(tp);
12472 if (tg3_flag(tp, USE_PHYLIB)) {
12473 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12475 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12479 spin_lock_bh(&tp->lock);
12481 tg3_readphy(tp, MII_BMCR, &bmcr);
12482 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12483 ((bmcr & BMCR_ANENABLE) ||
12484 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12485 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12489 spin_unlock_bh(&tp->lock);
12495 static void tg3_get_ringparam(struct net_device *dev,
12496 struct ethtool_ringparam *ering,
12497 struct kernel_ethtool_ringparam *kernel_ering,
12498 struct netlink_ext_ack *extack)
12500 struct tg3 *tp = netdev_priv(dev);
12502 ering->rx_max_pending = tp->rx_std_ring_mask;
12503 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12504 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12506 ering->rx_jumbo_max_pending = 0;
12508 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12510 ering->rx_pending = tp->rx_pending;
12511 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12512 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12514 ering->rx_jumbo_pending = 0;
12516 ering->tx_pending = tp->napi[0].tx_pending;
12519 static int tg3_set_ringparam(struct net_device *dev,
12520 struct ethtool_ringparam *ering,
12521 struct kernel_ethtool_ringparam *kernel_ering,
12522 struct netlink_ext_ack *extack)
12524 struct tg3 *tp = netdev_priv(dev);
12525 int i, irq_sync = 0, err = 0;
12526 bool reset_phy = false;
12528 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12529 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12530 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12531 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12532 (tg3_flag(tp, TSO_BUG) &&
12533 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12536 if (netif_running(dev)) {
12538 tg3_netif_stop(tp);
12542 tg3_full_lock(tp, irq_sync);
12544 tp->rx_pending = ering->rx_pending;
12546 if (tg3_flag(tp, MAX_RXPEND_64) &&
12547 tp->rx_pending > 63)
12548 tp->rx_pending = 63;
12550 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12551 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12553 for (i = 0; i < tp->irq_max; i++)
12554 tp->napi[i].tx_pending = ering->tx_pending;
12556 if (netif_running(dev)) {
12557 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12558 /* Reset PHY to avoid PHY lock up */
12559 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12560 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12561 tg3_asic_rev(tp) == ASIC_REV_5720)
12564 err = tg3_restart_hw(tp, reset_phy);
12566 tg3_netif_start(tp);
12569 tg3_full_unlock(tp);
12571 if (irq_sync && !err)
12577 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12579 struct tg3 *tp = netdev_priv(dev);
12581 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12583 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12584 epause->rx_pause = 1;
12586 epause->rx_pause = 0;
12588 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12589 epause->tx_pause = 1;
12591 epause->tx_pause = 0;
12594 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12596 struct tg3 *tp = netdev_priv(dev);
12598 bool reset_phy = false;
12600 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12601 tg3_warn_mgmt_link_flap(tp);
12603 if (tg3_flag(tp, USE_PHYLIB)) {
12604 struct phy_device *phydev;
12606 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12608 if (!phy_validate_pause(phydev, epause))
12611 tp->link_config.flowctrl = 0;
12612 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
12613 if (epause->rx_pause) {
12614 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12616 if (epause->tx_pause) {
12617 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12619 } else if (epause->tx_pause) {
12620 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12623 if (epause->autoneg)
12624 tg3_flag_set(tp, PAUSE_AUTONEG);
12626 tg3_flag_clear(tp, PAUSE_AUTONEG);
12628 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12629 if (phydev->autoneg) {
12630 /* phy_set_asym_pause() will
12631 * renegotiate the link to inform our
12632 * link partner of our flow control
12633 * settings, even if the flow control
12634 * is forced. Let tg3_adjust_link()
12635 * do the final flow control setup.
12640 if (!epause->autoneg)
12641 tg3_setup_flow_control(tp, 0, 0);
12646 if (netif_running(dev)) {
12647 tg3_netif_stop(tp);
12651 tg3_full_lock(tp, irq_sync);
12653 if (epause->autoneg)
12654 tg3_flag_set(tp, PAUSE_AUTONEG);
12656 tg3_flag_clear(tp, PAUSE_AUTONEG);
12657 if (epause->rx_pause)
12658 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12660 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12661 if (epause->tx_pause)
12662 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12664 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12666 if (netif_running(dev)) {
12667 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12668 /* Reset PHY to avoid PHY lock up */
12669 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12670 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12671 tg3_asic_rev(tp) == ASIC_REV_5720)
12674 err = tg3_restart_hw(tp, reset_phy);
12676 tg3_netif_start(tp);
12679 tg3_full_unlock(tp);
12682 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12687 static int tg3_get_sset_count(struct net_device *dev, int sset)
12691 return TG3_NUM_TEST;
12693 return TG3_NUM_STATS;
12695 return -EOPNOTSUPP;
12699 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12700 u32 *rules __always_unused)
12702 struct tg3 *tp = netdev_priv(dev);
12704 if (!tg3_flag(tp, SUPPORT_MSIX))
12705 return -EOPNOTSUPP;
12707 switch (info->cmd) {
12708 case ETHTOOL_GRXRINGS:
12709 if (netif_running(tp->dev))
12710 info->data = tp->rxq_cnt;
12712 info->data = num_online_cpus();
12713 if (info->data > TG3_RSS_MAX_NUM_QS)
12714 info->data = TG3_RSS_MAX_NUM_QS;
12720 return -EOPNOTSUPP;
12724 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12727 struct tg3 *tp = netdev_priv(dev);
12729 if (tg3_flag(tp, SUPPORT_MSIX))
12730 size = TG3_RSS_INDIR_TBL_SIZE;
12735 static int tg3_get_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh)
12737 struct tg3 *tp = netdev_priv(dev);
12740 rxfh->hfunc = ETH_RSS_HASH_TOP;
12744 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12745 rxfh->indir[i] = tp->rss_ind_tbl[i];
12750 static int tg3_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
12751 struct netlink_ext_ack *extack)
12753 struct tg3 *tp = netdev_priv(dev);
12756 /* We require at least one supported parameter to be changed and no
12757 * change in any of the unsupported parameters
12760 (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
12761 rxfh->hfunc != ETH_RSS_HASH_TOP))
12762 return -EOPNOTSUPP;
12767 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12768 tp->rss_ind_tbl[i] = rxfh->indir[i];
12770 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12773 /* It is legal to write the indirection
12774 * table while the device is running.
12776 tg3_full_lock(tp, 0);
12777 tg3_rss_write_indir_tbl(tp);
12778 tg3_full_unlock(tp);
12783 static void tg3_get_channels(struct net_device *dev,
12784 struct ethtool_channels *channel)
12786 struct tg3 *tp = netdev_priv(dev);
12787 u32 deflt_qs = netif_get_num_default_rss_queues();
12789 channel->max_rx = tp->rxq_max;
12790 channel->max_tx = tp->txq_max;
12792 if (netif_running(dev)) {
12793 channel->rx_count = tp->rxq_cnt;
12794 channel->tx_count = tp->txq_cnt;
12797 channel->rx_count = tp->rxq_req;
12799 channel->rx_count = min(deflt_qs, tp->rxq_max);
12802 channel->tx_count = tp->txq_req;
12804 channel->tx_count = min(deflt_qs, tp->txq_max);
12808 static int tg3_set_channels(struct net_device *dev,
12809 struct ethtool_channels *channel)
12811 struct tg3 *tp = netdev_priv(dev);
12813 if (!tg3_flag(tp, SUPPORT_MSIX))
12814 return -EOPNOTSUPP;
12816 if (channel->rx_count > tp->rxq_max ||
12817 channel->tx_count > tp->txq_max)
12820 tp->rxq_req = channel->rx_count;
12821 tp->txq_req = channel->tx_count;
12823 if (!netif_running(dev))
12828 tg3_carrier_off(tp);
12830 tg3_start(tp, true, false, false);
12835 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12837 switch (stringset) {
12839 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
12842 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
12845 WARN_ON(1); /* we need a WARN() */
12850 static int tg3_set_phys_id(struct net_device *dev,
12851 enum ethtool_phys_id_state state)
12853 struct tg3 *tp = netdev_priv(dev);
12856 case ETHTOOL_ID_ACTIVE:
12857 return 1; /* cycle on/off once per second */
12859 case ETHTOOL_ID_ON:
12860 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12861 LED_CTRL_1000MBPS_ON |
12862 LED_CTRL_100MBPS_ON |
12863 LED_CTRL_10MBPS_ON |
12864 LED_CTRL_TRAFFIC_OVERRIDE |
12865 LED_CTRL_TRAFFIC_BLINK |
12866 LED_CTRL_TRAFFIC_LED);
12869 case ETHTOOL_ID_OFF:
12870 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12871 LED_CTRL_TRAFFIC_OVERRIDE);
12874 case ETHTOOL_ID_INACTIVE:
12875 tw32(MAC_LED_CTRL, tp->led_ctrl);
12882 static void tg3_get_ethtool_stats(struct net_device *dev,
12883 struct ethtool_stats *estats, u64 *tmp_stats)
12885 struct tg3 *tp = netdev_priv(dev);
12888 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12890 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12893 static __be32 *tg3_vpd_readblock(struct tg3 *tp, unsigned int *vpdlen)
12897 u32 offset = 0, len = 0;
12900 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12903 if (magic == TG3_EEPROM_MAGIC) {
12904 for (offset = TG3_NVM_DIR_START;
12905 offset < TG3_NVM_DIR_END;
12906 offset += TG3_NVM_DIRENT_SIZE) {
12907 if (tg3_nvram_read(tp, offset, &val))
12910 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12911 TG3_NVM_DIRTYPE_EXTVPD)
12915 if (offset != TG3_NVM_DIR_END) {
12916 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12917 if (tg3_nvram_read(tp, offset + 4, &offset))
12920 offset = tg3_nvram_logical_addr(tp, offset);
12923 if (!offset || !len) {
12924 offset = TG3_NVM_VPD_OFF;
12925 len = TG3_NVM_VPD_LEN;
12928 buf = kmalloc(len, GFP_KERNEL);
12932 for (i = 0; i < len; i += 4) {
12933 /* The data is in little-endian format in NVRAM.
12934 * Use the big-endian read routines to preserve
12935 * the byte order as it exists in NVRAM.
12937 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12942 buf = pci_vpd_alloc(tp->pdev, vpdlen);
12954 #define NVRAM_TEST_SIZE 0x100
12955 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12956 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12957 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12958 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12959 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12960 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12961 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12962 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12964 static int tg3_test_nvram(struct tg3 *tp)
12968 int i, j, k, err = 0, size;
12971 if (tg3_flag(tp, NO_NVRAM))
12974 if (tg3_nvram_read(tp, 0, &magic) != 0)
12977 if (magic == TG3_EEPROM_MAGIC)
12978 size = NVRAM_TEST_SIZE;
12979 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12980 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12981 TG3_EEPROM_SB_FORMAT_1) {
12982 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12983 case TG3_EEPROM_SB_REVISION_0:
12984 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12986 case TG3_EEPROM_SB_REVISION_2:
12987 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12989 case TG3_EEPROM_SB_REVISION_3:
12990 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12992 case TG3_EEPROM_SB_REVISION_4:
12993 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12995 case TG3_EEPROM_SB_REVISION_5:
12996 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12998 case TG3_EEPROM_SB_REVISION_6:
12999 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
13006 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13007 size = NVRAM_SELFBOOT_HW_SIZE;
13011 buf = kmalloc(size, GFP_KERNEL);
13016 for (i = 0, j = 0; i < size; i += 4, j++) {
13017 err = tg3_nvram_read_be32(tp, i, &buf[j]);
13024 /* Selfboot format */
13025 magic = be32_to_cpu(buf[0]);
13026 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
13027 TG3_EEPROM_MAGIC_FW) {
13028 u8 *buf8 = (u8 *) buf, csum8 = 0;
13030 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
13031 TG3_EEPROM_SB_REVISION_2) {
13032 /* For rev 2, the csum doesn't include the MBA. */
13033 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
13035 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
13038 for (i = 0; i < size; i++)
13051 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
13052 TG3_EEPROM_MAGIC_HW) {
13053 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
13054 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
13055 u8 *buf8 = (u8 *) buf;
13057 /* Separate the parity bits and the data bytes. */
13058 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
13059 if ((i == 0) || (i == 8)) {
13063 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
13064 parity[k++] = buf8[i] & msk;
13066 } else if (i == 16) {
13070 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
13071 parity[k++] = buf8[i] & msk;
13074 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
13075 parity[k++] = buf8[i] & msk;
13078 data[j++] = buf8[i];
13082 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
13083 u8 hw8 = hweight8(data[i]);
13085 if ((hw8 & 0x1) && parity[i])
13087 else if (!(hw8 & 0x1) && !parity[i])
13096 /* Bootstrap checksum at offset 0x10 */
13097 csum = calc_crc((unsigned char *) buf, 0x10);
13098 if (csum != le32_to_cpu(buf[0x10/4]))
13101 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13102 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
13103 if (csum != le32_to_cpu(buf[0xfc/4]))
13108 buf = tg3_vpd_readblock(tp, &len);
13112 err = pci_vpd_check_csum(buf, len);
13113 /* go on if no checksum found */
13121 #define TG3_SERDES_TIMEOUT_SEC 2
13122 #define TG3_COPPER_TIMEOUT_SEC 6
13124 static int tg3_test_link(struct tg3 *tp)
13128 if (!netif_running(tp->dev))
13131 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13132 max = TG3_SERDES_TIMEOUT_SEC;
13134 max = TG3_COPPER_TIMEOUT_SEC;
13136 for (i = 0; i < max; i++) {
13140 if (msleep_interruptible(1000))
13147 /* Only test the commonly used registers */
13148 static int tg3_test_registers(struct tg3 *tp)
13150 int i, is_5705, is_5750;
13151 u32 offset, read_mask, write_mask, val, save_val, read_val;
13155 #define TG3_FL_5705 0x1
13156 #define TG3_FL_NOT_5705 0x2
13157 #define TG3_FL_NOT_5788 0x4
13158 #define TG3_FL_NOT_5750 0x8
13162 /* MAC Control Registers */
13163 { MAC_MODE, TG3_FL_NOT_5705,
13164 0x00000000, 0x00ef6f8c },
13165 { MAC_MODE, TG3_FL_5705,
13166 0x00000000, 0x01ef6b8c },
13167 { MAC_STATUS, TG3_FL_NOT_5705,
13168 0x03800107, 0x00000000 },
13169 { MAC_STATUS, TG3_FL_5705,
13170 0x03800100, 0x00000000 },
13171 { MAC_ADDR_0_HIGH, 0x0000,
13172 0x00000000, 0x0000ffff },
13173 { MAC_ADDR_0_LOW, 0x0000,
13174 0x00000000, 0xffffffff },
13175 { MAC_RX_MTU_SIZE, 0x0000,
13176 0x00000000, 0x0000ffff },
13177 { MAC_TX_MODE, 0x0000,
13178 0x00000000, 0x00000070 },
13179 { MAC_TX_LENGTHS, 0x0000,
13180 0x00000000, 0x00003fff },
13181 { MAC_RX_MODE, TG3_FL_NOT_5705,
13182 0x00000000, 0x000007fc },
13183 { MAC_RX_MODE, TG3_FL_5705,
13184 0x00000000, 0x000007dc },
13185 { MAC_HASH_REG_0, 0x0000,
13186 0x00000000, 0xffffffff },
13187 { MAC_HASH_REG_1, 0x0000,
13188 0x00000000, 0xffffffff },
13189 { MAC_HASH_REG_2, 0x0000,
13190 0x00000000, 0xffffffff },
13191 { MAC_HASH_REG_3, 0x0000,
13192 0x00000000, 0xffffffff },
13194 /* Receive Data and Receive BD Initiator Control Registers. */
13195 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13196 0x00000000, 0xffffffff },
13197 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13198 0x00000000, 0xffffffff },
13199 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13200 0x00000000, 0x00000003 },
13201 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13202 0x00000000, 0xffffffff },
13203 { RCVDBDI_STD_BD+0, 0x0000,
13204 0x00000000, 0xffffffff },
13205 { RCVDBDI_STD_BD+4, 0x0000,
13206 0x00000000, 0xffffffff },
13207 { RCVDBDI_STD_BD+8, 0x0000,
13208 0x00000000, 0xffff0002 },
13209 { RCVDBDI_STD_BD+0xc, 0x0000,
13210 0x00000000, 0xffffffff },
13212 /* Receive BD Initiator Control Registers. */
13213 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13214 0x00000000, 0xffffffff },
13215 { RCVBDI_STD_THRESH, TG3_FL_5705,
13216 0x00000000, 0x000003ff },
13217 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13218 0x00000000, 0xffffffff },
13220 /* Host Coalescing Control Registers. */
13221 { HOSTCC_MODE, TG3_FL_NOT_5705,
13222 0x00000000, 0x00000004 },
13223 { HOSTCC_MODE, TG3_FL_5705,
13224 0x00000000, 0x000000f6 },
13225 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13226 0x00000000, 0xffffffff },
13227 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13228 0x00000000, 0x000003ff },
13229 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13230 0x00000000, 0xffffffff },
13231 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13232 0x00000000, 0x000003ff },
13233 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13234 0x00000000, 0xffffffff },
13235 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13236 0x00000000, 0x000000ff },
13237 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13238 0x00000000, 0xffffffff },
13239 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13240 0x00000000, 0x000000ff },
13241 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13242 0x00000000, 0xffffffff },
13243 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13244 0x00000000, 0xffffffff },
13245 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13246 0x00000000, 0xffffffff },
13247 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13248 0x00000000, 0x000000ff },
13249 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13250 0x00000000, 0xffffffff },
13251 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13252 0x00000000, 0x000000ff },
13253 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13254 0x00000000, 0xffffffff },
13255 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13256 0x00000000, 0xffffffff },
13257 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13258 0x00000000, 0xffffffff },
13259 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13260 0x00000000, 0xffffffff },
13261 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13262 0x00000000, 0xffffffff },
13263 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13264 0xffffffff, 0x00000000 },
13265 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13266 0xffffffff, 0x00000000 },
13268 /* Buffer Manager Control Registers. */
13269 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13270 0x00000000, 0x007fff80 },
13271 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13272 0x00000000, 0x007fffff },
13273 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13274 0x00000000, 0x0000003f },
13275 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13276 0x00000000, 0x000001ff },
13277 { BUFMGR_MB_HIGH_WATER, 0x0000,
13278 0x00000000, 0x000001ff },
13279 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13280 0xffffffff, 0x00000000 },
13281 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13282 0xffffffff, 0x00000000 },
13284 /* Mailbox Registers */
13285 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13286 0x00000000, 0x000001ff },
13287 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13288 0x00000000, 0x000001ff },
13289 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13290 0x00000000, 0x000007ff },
13291 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13292 0x00000000, 0x000001ff },
13294 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13297 is_5705 = is_5750 = 0;
13298 if (tg3_flag(tp, 5705_PLUS)) {
13300 if (tg3_flag(tp, 5750_PLUS))
13304 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13305 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13308 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13311 if (tg3_flag(tp, IS_5788) &&
13312 (reg_tbl[i].flags & TG3_FL_NOT_5788))
13315 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13318 offset = (u32) reg_tbl[i].offset;
13319 read_mask = reg_tbl[i].read_mask;
13320 write_mask = reg_tbl[i].write_mask;
13322 /* Save the original register content */
13323 save_val = tr32(offset);
13325 /* Determine the read-only value. */
13326 read_val = save_val & read_mask;
13328 /* Write zero to the register, then make sure the read-only bits
13329 * are not changed and the read/write bits are all zeros.
13333 val = tr32(offset);
13335 /* Test the read-only and read/write bits. */
13336 if (((val & read_mask) != read_val) || (val & write_mask))
13339 /* Write ones to all the bits defined by RdMask and WrMask, then
13340 * make sure the read-only bits are not changed and the
13341 * read/write bits are all ones.
13343 tw32(offset, read_mask | write_mask);
13345 val = tr32(offset);
13347 /* Test the read-only bits. */
13348 if ((val & read_mask) != read_val)
13351 /* Test the read/write bits. */
13352 if ((val & write_mask) != write_mask)
13355 tw32(offset, save_val);
13361 if (netif_msg_hw(tp))
13362 netdev_err(tp->dev,
13363 "Register test failed at offset %x\n", offset);
13364 tw32(offset, save_val);
13368 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13370 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13374 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13375 for (j = 0; j < len; j += 4) {
13378 tg3_write_mem(tp, offset + j, test_pattern[i]);
13379 tg3_read_mem(tp, offset + j, &val);
13380 if (val != test_pattern[i])
13387 static int tg3_test_memory(struct tg3 *tp)
13389 static struct mem_entry {
13392 } mem_tbl_570x[] = {
13393 { 0x00000000, 0x00b50},
13394 { 0x00002000, 0x1c000},
13395 { 0xffffffff, 0x00000}
13396 }, mem_tbl_5705[] = {
13397 { 0x00000100, 0x0000c},
13398 { 0x00000200, 0x00008},
13399 { 0x00004000, 0x00800},
13400 { 0x00006000, 0x01000},
13401 { 0x00008000, 0x02000},
13402 { 0x00010000, 0x0e000},
13403 { 0xffffffff, 0x00000}
13404 }, mem_tbl_5755[] = {
13405 { 0x00000200, 0x00008},
13406 { 0x00004000, 0x00800},
13407 { 0x00006000, 0x00800},
13408 { 0x00008000, 0x02000},
13409 { 0x00010000, 0x0c000},
13410 { 0xffffffff, 0x00000}
13411 }, mem_tbl_5906[] = {
13412 { 0x00000200, 0x00008},
13413 { 0x00004000, 0x00400},
13414 { 0x00006000, 0x00400},
13415 { 0x00008000, 0x01000},
13416 { 0x00010000, 0x01000},
13417 { 0xffffffff, 0x00000}
13418 }, mem_tbl_5717[] = {
13419 { 0x00000200, 0x00008},
13420 { 0x00010000, 0x0a000},
13421 { 0x00020000, 0x13c00},
13422 { 0xffffffff, 0x00000}
13423 }, mem_tbl_57765[] = {
13424 { 0x00000200, 0x00008},
13425 { 0x00004000, 0x00800},
13426 { 0x00006000, 0x09800},
13427 { 0x00010000, 0x0a000},
13428 { 0xffffffff, 0x00000}
13430 struct mem_entry *mem_tbl;
13434 if (tg3_flag(tp, 5717_PLUS))
13435 mem_tbl = mem_tbl_5717;
13436 else if (tg3_flag(tp, 57765_CLASS) ||
13437 tg3_asic_rev(tp) == ASIC_REV_5762)
13438 mem_tbl = mem_tbl_57765;
13439 else if (tg3_flag(tp, 5755_PLUS))
13440 mem_tbl = mem_tbl_5755;
13441 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13442 mem_tbl = mem_tbl_5906;
13443 else if (tg3_flag(tp, 5705_PLUS))
13444 mem_tbl = mem_tbl_5705;
13446 mem_tbl = mem_tbl_570x;
13448 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13449 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13457 #define TG3_TSO_MSS 500
13459 #define TG3_TSO_IP_HDR_LEN 20
13460 #define TG3_TSO_TCP_HDR_LEN 20
13461 #define TG3_TSO_TCP_OPT_LEN 12
13463 static const u8 tg3_tso_header[] = {
13465 0x45, 0x00, 0x00, 0x00,
13466 0x00, 0x00, 0x40, 0x00,
13467 0x40, 0x06, 0x00, 0x00,
13468 0x0a, 0x00, 0x00, 0x01,
13469 0x0a, 0x00, 0x00, 0x02,
13470 0x0d, 0x00, 0xe0, 0x00,
13471 0x00, 0x00, 0x01, 0x00,
13472 0x00, 0x00, 0x02, 0x00,
13473 0x80, 0x10, 0x10, 0x00,
13474 0x14, 0x09, 0x00, 0x00,
13475 0x01, 0x01, 0x08, 0x0a,
13476 0x11, 0x11, 0x11, 0x11,
13477 0x11, 0x11, 0x11, 0x11,
13480 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13482 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13483 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13485 struct sk_buff *skb;
13486 u8 *tx_data, *rx_data;
13488 int num_pkts, tx_len, rx_len, i, err;
13489 struct tg3_rx_buffer_desc *desc;
13490 struct tg3_napi *tnapi, *rnapi;
13491 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13493 tnapi = &tp->napi[0];
13494 rnapi = &tp->napi[0];
13495 if (tp->irq_cnt > 1) {
13496 if (tg3_flag(tp, ENABLE_RSS))
13497 rnapi = &tp->napi[1];
13498 if (tg3_flag(tp, ENABLE_TSS))
13499 tnapi = &tp->napi[1];
13501 coal_now = tnapi->coal_now | rnapi->coal_now;
13506 skb = netdev_alloc_skb(tp->dev, tx_len);
13510 tx_data = skb_put(skb, tx_len);
13511 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13512 memset(tx_data + ETH_ALEN, 0x0, 8);
13514 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13516 if (tso_loopback) {
13517 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13519 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13520 TG3_TSO_TCP_OPT_LEN;
13522 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13523 sizeof(tg3_tso_header));
13526 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13527 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13529 /* Set the total length field in the IP header */
13530 iph->tot_len = htons((u16)(mss + hdr_len));
13532 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13533 TXD_FLAG_CPU_POST_DMA);
13535 if (tg3_flag(tp, HW_TSO_1) ||
13536 tg3_flag(tp, HW_TSO_2) ||
13537 tg3_flag(tp, HW_TSO_3)) {
13539 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13540 th = (struct tcphdr *)&tx_data[val];
13543 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13545 if (tg3_flag(tp, HW_TSO_3)) {
13546 mss |= (hdr_len & 0xc) << 12;
13547 if (hdr_len & 0x10)
13548 base_flags |= 0x00000010;
13549 base_flags |= (hdr_len & 0x3e0) << 5;
13550 } else if (tg3_flag(tp, HW_TSO_2))
13551 mss |= hdr_len << 9;
13552 else if (tg3_flag(tp, HW_TSO_1) ||
13553 tg3_asic_rev(tp) == ASIC_REV_5705) {
13554 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13556 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13559 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13562 data_off = ETH_HLEN;
13564 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13565 tx_len > VLAN_ETH_FRAME_LEN)
13566 base_flags |= TXD_FLAG_JMB_PKT;
13569 for (i = data_off; i < tx_len; i++)
13570 tx_data[i] = (u8) (i & 0xff);
13572 map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE);
13573 if (dma_mapping_error(&tp->pdev->dev, map)) {
13574 dev_kfree_skb(skb);
13578 val = tnapi->tx_prod;
13579 tnapi->tx_buffers[val].skb = skb;
13580 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13582 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13587 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13589 budget = tg3_tx_avail(tnapi);
13590 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13591 base_flags | TXD_FLAG_END, mss, 0)) {
13592 tnapi->tx_buffers[val].skb = NULL;
13593 dev_kfree_skb(skb);
13599 /* Sync BD data before updating mailbox */
13602 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13603 tr32_mailbox(tnapi->prodmbox);
13607 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13608 for (i = 0; i < 35; i++) {
13609 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13614 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13615 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13616 if ((tx_idx == tnapi->tx_prod) &&
13617 (rx_idx == (rx_start_idx + num_pkts)))
13621 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13622 dev_kfree_skb(skb);
13624 if (tx_idx != tnapi->tx_prod)
13627 if (rx_idx != rx_start_idx + num_pkts)
13631 while (rx_idx != rx_start_idx) {
13632 desc = &rnapi->rx_rcb[rx_start_idx++];
13633 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13634 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13636 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13637 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13640 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13643 if (!tso_loopback) {
13644 if (rx_len != tx_len)
13647 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13648 if (opaque_key != RXD_OPAQUE_RING_STD)
13651 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13654 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13655 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13656 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13660 if (opaque_key == RXD_OPAQUE_RING_STD) {
13661 rx_data = tpr->rx_std_buffers[desc_idx].data;
13662 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13664 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13665 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13666 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13671 dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len,
13674 rx_data += TG3_RX_OFFSET(tp);
13675 for (i = data_off; i < rx_len; i++, val++) {
13676 if (*(rx_data + i) != (u8) (val & 0xff))
13683 /* tg3_free_rings will unmap and free the rx_data */
13688 #define TG3_STD_LOOPBACK_FAILED 1
13689 #define TG3_JMB_LOOPBACK_FAILED 2
13690 #define TG3_TSO_LOOPBACK_FAILED 4
13691 #define TG3_LOOPBACK_FAILED \
13692 (TG3_STD_LOOPBACK_FAILED | \
13693 TG3_JMB_LOOPBACK_FAILED | \
13694 TG3_TSO_LOOPBACK_FAILED)
13696 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13700 u32 jmb_pkt_sz = 9000;
13703 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13705 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13706 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13708 if (!netif_running(tp->dev)) {
13709 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13710 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13712 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13716 err = tg3_reset_hw(tp, true);
13718 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13719 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13721 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13725 if (tg3_flag(tp, ENABLE_RSS)) {
13728 /* Reroute all rx packets to the 1st queue */
13729 for (i = MAC_RSS_INDIR_TBL_0;
13730 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13734 /* HW errata - mac loopback fails in some cases on 5780.
13735 * Normal traffic and PHY loopback are not affected by
13736 * errata. Also, the MAC loopback test is deprecated for
13737 * all newer ASIC revisions.
13739 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13740 !tg3_flag(tp, CPMU_PRESENT)) {
13741 tg3_mac_loopback(tp, true);
13743 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13744 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13746 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13747 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13748 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13750 tg3_mac_loopback(tp, false);
13753 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13754 !tg3_flag(tp, USE_PHYLIB)) {
13757 tg3_phy_lpbk_set(tp, 0, false);
13759 /* Wait for link */
13760 for (i = 0; i < 100; i++) {
13761 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13766 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13767 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13768 if (tg3_flag(tp, TSO_CAPABLE) &&
13769 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13770 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13771 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13772 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13773 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13776 tg3_phy_lpbk_set(tp, 0, true);
13778 /* All link indications report up, but the hardware
13779 * isn't really ready for about 20 msec. Double it
13784 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13785 data[TG3_EXT_LOOPB_TEST] |=
13786 TG3_STD_LOOPBACK_FAILED;
13787 if (tg3_flag(tp, TSO_CAPABLE) &&
13788 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13789 data[TG3_EXT_LOOPB_TEST] |=
13790 TG3_TSO_LOOPBACK_FAILED;
13791 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13792 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13793 data[TG3_EXT_LOOPB_TEST] |=
13794 TG3_JMB_LOOPBACK_FAILED;
13797 /* Re-enable gphy autopowerdown. */
13798 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13799 tg3_phy_toggle_apd(tp, true);
13802 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13803 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13806 tp->phy_flags |= eee_cap;
13811 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13814 struct tg3 *tp = netdev_priv(dev);
13815 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13817 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13818 if (tg3_power_up(tp)) {
13819 etest->flags |= ETH_TEST_FL_FAILED;
13820 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13823 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13826 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13828 if (tg3_test_nvram(tp) != 0) {
13829 etest->flags |= ETH_TEST_FL_FAILED;
13830 data[TG3_NVRAM_TEST] = 1;
13832 if (!doextlpbk && tg3_test_link(tp)) {
13833 etest->flags |= ETH_TEST_FL_FAILED;
13834 data[TG3_LINK_TEST] = 1;
13836 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13837 int err, err2 = 0, irq_sync = 0;
13839 if (netif_running(dev)) {
13841 tg3_netif_stop(tp);
13845 tg3_full_lock(tp, irq_sync);
13846 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13847 err = tg3_nvram_lock(tp);
13848 tg3_halt_cpu(tp, RX_CPU_BASE);
13849 if (!tg3_flag(tp, 5705_PLUS))
13850 tg3_halt_cpu(tp, TX_CPU_BASE);
13852 tg3_nvram_unlock(tp);
13854 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13857 if (tg3_test_registers(tp) != 0) {
13858 etest->flags |= ETH_TEST_FL_FAILED;
13859 data[TG3_REGISTER_TEST] = 1;
13862 if (tg3_test_memory(tp) != 0) {
13863 etest->flags |= ETH_TEST_FL_FAILED;
13864 data[TG3_MEMORY_TEST] = 1;
13868 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13870 if (tg3_test_loopback(tp, data, doextlpbk))
13871 etest->flags |= ETH_TEST_FL_FAILED;
13873 tg3_full_unlock(tp);
13875 if (tg3_test_interrupt(tp) != 0) {
13876 etest->flags |= ETH_TEST_FL_FAILED;
13877 data[TG3_INTERRUPT_TEST] = 1;
13880 tg3_full_lock(tp, 0);
13882 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13883 if (netif_running(dev)) {
13884 tg3_flag_set(tp, INIT_COMPLETE);
13885 err2 = tg3_restart_hw(tp, true);
13887 tg3_netif_start(tp);
13890 tg3_full_unlock(tp);
13892 if (irq_sync && !err2)
13895 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13896 tg3_power_down_prepare(tp);
13900 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13902 struct tg3 *tp = netdev_priv(dev);
13903 struct hwtstamp_config stmpconf;
13905 if (!tg3_flag(tp, PTP_CAPABLE))
13906 return -EOPNOTSUPP;
13908 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13911 if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13912 stmpconf.tx_type != HWTSTAMP_TX_OFF)
13915 switch (stmpconf.rx_filter) {
13916 case HWTSTAMP_FILTER_NONE:
13919 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13920 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13921 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13923 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13924 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13925 TG3_RX_PTP_CTL_SYNC_EVNT;
13927 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13928 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13929 TG3_RX_PTP_CTL_DELAY_REQ;
13931 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13932 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13933 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13935 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13936 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13937 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13939 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13940 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13941 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13943 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13944 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13945 TG3_RX_PTP_CTL_SYNC_EVNT;
13947 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13948 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13949 TG3_RX_PTP_CTL_SYNC_EVNT;
13951 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13952 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13953 TG3_RX_PTP_CTL_SYNC_EVNT;
13955 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13956 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13957 TG3_RX_PTP_CTL_DELAY_REQ;
13959 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13960 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13961 TG3_RX_PTP_CTL_DELAY_REQ;
13963 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13964 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13965 TG3_RX_PTP_CTL_DELAY_REQ;
13971 if (netif_running(dev) && tp->rxptpctl)
13972 tw32(TG3_RX_PTP_CTL,
13973 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13975 if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13976 tg3_flag_set(tp, TX_TSTAMP_EN);
13978 tg3_flag_clear(tp, TX_TSTAMP_EN);
13980 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13984 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13986 struct tg3 *tp = netdev_priv(dev);
13987 struct hwtstamp_config stmpconf;
13989 if (!tg3_flag(tp, PTP_CAPABLE))
13990 return -EOPNOTSUPP;
13992 stmpconf.flags = 0;
13993 stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13994 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13996 switch (tp->rxptpctl) {
13998 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
14000 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
14001 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
14003 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14004 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
14006 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14007 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
14009 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
14010 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
14012 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
14013 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
14015 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
14016 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
14018 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14019 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
14021 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14022 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
14024 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14025 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
14027 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14028 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
14030 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14031 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
14033 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14034 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
14041 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
14045 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
14047 struct mii_ioctl_data *data = if_mii(ifr);
14048 struct tg3 *tp = netdev_priv(dev);
14051 if (tg3_flag(tp, USE_PHYLIB)) {
14052 struct phy_device *phydev;
14053 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
14055 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
14056 return phy_mii_ioctl(phydev, ifr, cmd);
14061 data->phy_id = tp->phy_addr;
14064 case SIOCGMIIREG: {
14067 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14068 break; /* We have no PHY */
14070 if (!netif_running(dev))
14073 spin_lock_bh(&tp->lock);
14074 err = __tg3_readphy(tp, data->phy_id & 0x1f,
14075 data->reg_num & 0x1f, &mii_regval);
14076 spin_unlock_bh(&tp->lock);
14078 data->val_out = mii_regval;
14084 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14085 break; /* We have no PHY */
14087 if (!netif_running(dev))
14090 spin_lock_bh(&tp->lock);
14091 err = __tg3_writephy(tp, data->phy_id & 0x1f,
14092 data->reg_num & 0x1f, data->val_in);
14093 spin_unlock_bh(&tp->lock);
14097 case SIOCSHWTSTAMP:
14098 return tg3_hwtstamp_set(dev, ifr);
14100 case SIOCGHWTSTAMP:
14101 return tg3_hwtstamp_get(dev, ifr);
14107 return -EOPNOTSUPP;
14110 static int tg3_get_coalesce(struct net_device *dev,
14111 struct ethtool_coalesce *ec,
14112 struct kernel_ethtool_coalesce *kernel_coal,
14113 struct netlink_ext_ack *extack)
14115 struct tg3 *tp = netdev_priv(dev);
14117 memcpy(ec, &tp->coal, sizeof(*ec));
14121 static int tg3_set_coalesce(struct net_device *dev,
14122 struct ethtool_coalesce *ec,
14123 struct kernel_ethtool_coalesce *kernel_coal,
14124 struct netlink_ext_ack *extack)
14126 struct tg3 *tp = netdev_priv(dev);
14127 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14128 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14130 if (!tg3_flag(tp, 5705_PLUS)) {
14131 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14132 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14133 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14134 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14137 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14138 (!ec->rx_coalesce_usecs) ||
14139 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14140 (!ec->tx_coalesce_usecs) ||
14141 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14142 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14143 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14144 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14145 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14146 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14147 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14148 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14151 /* Only copy relevant parameters, ignore all others. */
14152 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14153 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14154 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14155 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14156 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14157 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14158 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14159 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14160 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14162 if (netif_running(dev)) {
14163 tg3_full_lock(tp, 0);
14164 __tg3_set_coalesce(tp, &tp->coal);
14165 tg3_full_unlock(tp);
14170 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14172 struct tg3 *tp = netdev_priv(dev);
14174 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14175 netdev_warn(tp->dev, "Board does not support EEE!\n");
14176 return -EOPNOTSUPP;
14179 if (edata->advertised != tp->eee.advertised) {
14180 netdev_warn(tp->dev,
14181 "Direct manipulation of EEE advertisement is not supported\n");
14185 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14186 netdev_warn(tp->dev,
14187 "Maximal Tx Lpi timer supported is %#x(u)\n",
14188 TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14194 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14195 tg3_warn_mgmt_link_flap(tp);
14197 if (netif_running(tp->dev)) {
14198 tg3_full_lock(tp, 0);
14201 tg3_full_unlock(tp);
14207 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14209 struct tg3 *tp = netdev_priv(dev);
14211 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14212 netdev_warn(tp->dev,
14213 "Board does not support EEE!\n");
14214 return -EOPNOTSUPP;
14221 static const struct ethtool_ops tg3_ethtool_ops = {
14222 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
14223 ETHTOOL_COALESCE_MAX_FRAMES |
14224 ETHTOOL_COALESCE_USECS_IRQ |
14225 ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
14226 ETHTOOL_COALESCE_STATS_BLOCK_USECS,
14227 .get_drvinfo = tg3_get_drvinfo,
14228 .get_regs_len = tg3_get_regs_len,
14229 .get_regs = tg3_get_regs,
14230 .get_wol = tg3_get_wol,
14231 .set_wol = tg3_set_wol,
14232 .get_msglevel = tg3_get_msglevel,
14233 .set_msglevel = tg3_set_msglevel,
14234 .nway_reset = tg3_nway_reset,
14235 .get_link = ethtool_op_get_link,
14236 .get_eeprom_len = tg3_get_eeprom_len,
14237 .get_eeprom = tg3_get_eeprom,
14238 .set_eeprom = tg3_set_eeprom,
14239 .get_ringparam = tg3_get_ringparam,
14240 .set_ringparam = tg3_set_ringparam,
14241 .get_pauseparam = tg3_get_pauseparam,
14242 .set_pauseparam = tg3_set_pauseparam,
14243 .self_test = tg3_self_test,
14244 .get_strings = tg3_get_strings,
14245 .set_phys_id = tg3_set_phys_id,
14246 .get_ethtool_stats = tg3_get_ethtool_stats,
14247 .get_coalesce = tg3_get_coalesce,
14248 .set_coalesce = tg3_set_coalesce,
14249 .get_sset_count = tg3_get_sset_count,
14250 .get_rxnfc = tg3_get_rxnfc,
14251 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
14252 .get_rxfh = tg3_get_rxfh,
14253 .set_rxfh = tg3_set_rxfh,
14254 .get_channels = tg3_get_channels,
14255 .set_channels = tg3_set_channels,
14256 .get_ts_info = tg3_get_ts_info,
14257 .get_eee = tg3_get_eee,
14258 .set_eee = tg3_set_eee,
14259 .get_link_ksettings = tg3_get_link_ksettings,
14260 .set_link_ksettings = tg3_set_link_ksettings,
14263 static void tg3_get_stats64(struct net_device *dev,
14264 struct rtnl_link_stats64 *stats)
14266 struct tg3 *tp = netdev_priv(dev);
14268 spin_lock_bh(&tp->lock);
14269 if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14270 *stats = tp->net_stats_prev;
14271 spin_unlock_bh(&tp->lock);
14275 tg3_get_nstats(tp, stats);
14276 spin_unlock_bh(&tp->lock);
14279 static void tg3_set_rx_mode(struct net_device *dev)
14281 struct tg3 *tp = netdev_priv(dev);
14283 if (!netif_running(dev))
14286 tg3_full_lock(tp, 0);
14287 __tg3_set_rx_mode(dev);
14288 tg3_full_unlock(tp);
14291 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14294 dev->mtu = new_mtu;
14296 if (new_mtu > ETH_DATA_LEN) {
14297 if (tg3_flag(tp, 5780_CLASS)) {
14298 netdev_update_features(dev);
14299 tg3_flag_clear(tp, TSO_CAPABLE);
14301 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14304 if (tg3_flag(tp, 5780_CLASS)) {
14305 tg3_flag_set(tp, TSO_CAPABLE);
14306 netdev_update_features(dev);
14308 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14312 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14314 struct tg3 *tp = netdev_priv(dev);
14316 bool reset_phy = false;
14318 if (!netif_running(dev)) {
14319 /* We'll just catch it later when the
14322 tg3_set_mtu(dev, tp, new_mtu);
14328 tg3_netif_stop(tp);
14330 tg3_set_mtu(dev, tp, new_mtu);
14332 tg3_full_lock(tp, 1);
14334 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14336 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14337 * breaks all requests to 256 bytes.
14339 if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14340 tg3_asic_rev(tp) == ASIC_REV_5717 ||
14341 tg3_asic_rev(tp) == ASIC_REV_5719 ||
14342 tg3_asic_rev(tp) == ASIC_REV_5720)
14345 err = tg3_restart_hw(tp, reset_phy);
14348 tg3_netif_start(tp);
14350 tg3_full_unlock(tp);
14358 static const struct net_device_ops tg3_netdev_ops = {
14359 .ndo_open = tg3_open,
14360 .ndo_stop = tg3_close,
14361 .ndo_start_xmit = tg3_start_xmit,
14362 .ndo_get_stats64 = tg3_get_stats64,
14363 .ndo_validate_addr = eth_validate_addr,
14364 .ndo_set_rx_mode = tg3_set_rx_mode,
14365 .ndo_set_mac_address = tg3_set_mac_addr,
14366 .ndo_eth_ioctl = tg3_ioctl,
14367 .ndo_tx_timeout = tg3_tx_timeout,
14368 .ndo_change_mtu = tg3_change_mtu,
14369 .ndo_fix_features = tg3_fix_features,
14370 .ndo_set_features = tg3_set_features,
14371 #ifdef CONFIG_NET_POLL_CONTROLLER
14372 .ndo_poll_controller = tg3_poll_controller,
14376 static void tg3_get_eeprom_size(struct tg3 *tp)
14378 u32 cursize, val, magic;
14380 tp->nvram_size = EEPROM_CHIP_SIZE;
14382 if (tg3_nvram_read(tp, 0, &magic) != 0)
14385 if ((magic != TG3_EEPROM_MAGIC) &&
14386 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14387 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14391 * Size the chip by reading offsets at increasing powers of two.
14392 * When we encounter our validation signature, we know the addressing
14393 * has wrapped around, and thus have our chip size.
14397 while (cursize < tp->nvram_size) {
14398 if (tg3_nvram_read(tp, cursize, &val) != 0)
14407 tp->nvram_size = cursize;
14410 static void tg3_get_nvram_size(struct tg3 *tp)
14414 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14417 /* Selfboot format */
14418 if (val != TG3_EEPROM_MAGIC) {
14419 tg3_get_eeprom_size(tp);
14423 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14425 /* This is confusing. We want to operate on the
14426 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14427 * call will read from NVRAM and byteswap the data
14428 * according to the byteswapping settings for all
14429 * other register accesses. This ensures the data we
14430 * want will always reside in the lower 16-bits.
14431 * However, the data in NVRAM is in LE format, which
14432 * means the data from the NVRAM read will always be
14433 * opposite the endianness of the CPU. The 16-bit
14434 * byteswap then brings the data to CPU endianness.
14436 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14440 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14443 static void tg3_get_nvram_info(struct tg3 *tp)
14447 nvcfg1 = tr32(NVRAM_CFG1);
14448 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14449 tg3_flag_set(tp, FLASH);
14451 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14452 tw32(NVRAM_CFG1, nvcfg1);
14455 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14456 tg3_flag(tp, 5780_CLASS)) {
14457 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14458 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14459 tp->nvram_jedecnum = JEDEC_ATMEL;
14460 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14461 tg3_flag_set(tp, NVRAM_BUFFERED);
14463 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14464 tp->nvram_jedecnum = JEDEC_ATMEL;
14465 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14467 case FLASH_VENDOR_ATMEL_EEPROM:
14468 tp->nvram_jedecnum = JEDEC_ATMEL;
14469 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14470 tg3_flag_set(tp, NVRAM_BUFFERED);
14472 case FLASH_VENDOR_ST:
14473 tp->nvram_jedecnum = JEDEC_ST;
14474 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14475 tg3_flag_set(tp, NVRAM_BUFFERED);
14477 case FLASH_VENDOR_SAIFUN:
14478 tp->nvram_jedecnum = JEDEC_SAIFUN;
14479 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14481 case FLASH_VENDOR_SST_SMALL:
14482 case FLASH_VENDOR_SST_LARGE:
14483 tp->nvram_jedecnum = JEDEC_SST;
14484 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14488 tp->nvram_jedecnum = JEDEC_ATMEL;
14489 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14490 tg3_flag_set(tp, NVRAM_BUFFERED);
14494 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14496 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14497 case FLASH_5752PAGE_SIZE_256:
14498 tp->nvram_pagesize = 256;
14500 case FLASH_5752PAGE_SIZE_512:
14501 tp->nvram_pagesize = 512;
14503 case FLASH_5752PAGE_SIZE_1K:
14504 tp->nvram_pagesize = 1024;
14506 case FLASH_5752PAGE_SIZE_2K:
14507 tp->nvram_pagesize = 2048;
14509 case FLASH_5752PAGE_SIZE_4K:
14510 tp->nvram_pagesize = 4096;
14512 case FLASH_5752PAGE_SIZE_264:
14513 tp->nvram_pagesize = 264;
14515 case FLASH_5752PAGE_SIZE_528:
14516 tp->nvram_pagesize = 528;
14521 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14525 nvcfg1 = tr32(NVRAM_CFG1);
14527 /* NVRAM protection for TPM */
14528 if (nvcfg1 & (1 << 27))
14529 tg3_flag_set(tp, PROTECTED_NVRAM);
14531 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14532 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14533 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14534 tp->nvram_jedecnum = JEDEC_ATMEL;
14535 tg3_flag_set(tp, NVRAM_BUFFERED);
14537 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14538 tp->nvram_jedecnum = JEDEC_ATMEL;
14539 tg3_flag_set(tp, NVRAM_BUFFERED);
14540 tg3_flag_set(tp, FLASH);
14542 case FLASH_5752VENDOR_ST_M45PE10:
14543 case FLASH_5752VENDOR_ST_M45PE20:
14544 case FLASH_5752VENDOR_ST_M45PE40:
14545 tp->nvram_jedecnum = JEDEC_ST;
14546 tg3_flag_set(tp, NVRAM_BUFFERED);
14547 tg3_flag_set(tp, FLASH);
14551 if (tg3_flag(tp, FLASH)) {
14552 tg3_nvram_get_pagesize(tp, nvcfg1);
14554 /* For eeprom, set pagesize to maximum eeprom size */
14555 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14557 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14558 tw32(NVRAM_CFG1, nvcfg1);
14562 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14564 u32 nvcfg1, protect = 0;
14566 nvcfg1 = tr32(NVRAM_CFG1);
14568 /* NVRAM protection for TPM */
14569 if (nvcfg1 & (1 << 27)) {
14570 tg3_flag_set(tp, PROTECTED_NVRAM);
14574 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14576 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14577 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14578 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14579 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14580 tp->nvram_jedecnum = JEDEC_ATMEL;
14581 tg3_flag_set(tp, NVRAM_BUFFERED);
14582 tg3_flag_set(tp, FLASH);
14583 tp->nvram_pagesize = 264;
14584 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14585 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14586 tp->nvram_size = (protect ? 0x3e200 :
14587 TG3_NVRAM_SIZE_512KB);
14588 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14589 tp->nvram_size = (protect ? 0x1f200 :
14590 TG3_NVRAM_SIZE_256KB);
14592 tp->nvram_size = (protect ? 0x1f200 :
14593 TG3_NVRAM_SIZE_128KB);
14595 case FLASH_5752VENDOR_ST_M45PE10:
14596 case FLASH_5752VENDOR_ST_M45PE20:
14597 case FLASH_5752VENDOR_ST_M45PE40:
14598 tp->nvram_jedecnum = JEDEC_ST;
14599 tg3_flag_set(tp, NVRAM_BUFFERED);
14600 tg3_flag_set(tp, FLASH);
14601 tp->nvram_pagesize = 256;
14602 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14603 tp->nvram_size = (protect ?
14604 TG3_NVRAM_SIZE_64KB :
14605 TG3_NVRAM_SIZE_128KB);
14606 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14607 tp->nvram_size = (protect ?
14608 TG3_NVRAM_SIZE_64KB :
14609 TG3_NVRAM_SIZE_256KB);
14611 tp->nvram_size = (protect ?
14612 TG3_NVRAM_SIZE_128KB :
14613 TG3_NVRAM_SIZE_512KB);
14618 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14622 nvcfg1 = tr32(NVRAM_CFG1);
14624 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14625 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14626 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14627 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14628 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14629 tp->nvram_jedecnum = JEDEC_ATMEL;
14630 tg3_flag_set(tp, NVRAM_BUFFERED);
14631 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14633 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14634 tw32(NVRAM_CFG1, nvcfg1);
14636 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14637 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14638 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14639 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14640 tp->nvram_jedecnum = JEDEC_ATMEL;
14641 tg3_flag_set(tp, NVRAM_BUFFERED);
14642 tg3_flag_set(tp, FLASH);
14643 tp->nvram_pagesize = 264;
14645 case FLASH_5752VENDOR_ST_M45PE10:
14646 case FLASH_5752VENDOR_ST_M45PE20:
14647 case FLASH_5752VENDOR_ST_M45PE40:
14648 tp->nvram_jedecnum = JEDEC_ST;
14649 tg3_flag_set(tp, NVRAM_BUFFERED);
14650 tg3_flag_set(tp, FLASH);
14651 tp->nvram_pagesize = 256;
14656 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14658 u32 nvcfg1, protect = 0;
14660 nvcfg1 = tr32(NVRAM_CFG1);
14662 /* NVRAM protection for TPM */
14663 if (nvcfg1 & (1 << 27)) {
14664 tg3_flag_set(tp, PROTECTED_NVRAM);
14668 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14670 case FLASH_5761VENDOR_ATMEL_ADB021D:
14671 case FLASH_5761VENDOR_ATMEL_ADB041D:
14672 case FLASH_5761VENDOR_ATMEL_ADB081D:
14673 case FLASH_5761VENDOR_ATMEL_ADB161D:
14674 case FLASH_5761VENDOR_ATMEL_MDB021D:
14675 case FLASH_5761VENDOR_ATMEL_MDB041D:
14676 case FLASH_5761VENDOR_ATMEL_MDB081D:
14677 case FLASH_5761VENDOR_ATMEL_MDB161D:
14678 tp->nvram_jedecnum = JEDEC_ATMEL;
14679 tg3_flag_set(tp, NVRAM_BUFFERED);
14680 tg3_flag_set(tp, FLASH);
14681 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14682 tp->nvram_pagesize = 256;
14684 case FLASH_5761VENDOR_ST_A_M45PE20:
14685 case FLASH_5761VENDOR_ST_A_M45PE40:
14686 case FLASH_5761VENDOR_ST_A_M45PE80:
14687 case FLASH_5761VENDOR_ST_A_M45PE16:
14688 case FLASH_5761VENDOR_ST_M_M45PE20:
14689 case FLASH_5761VENDOR_ST_M_M45PE40:
14690 case FLASH_5761VENDOR_ST_M_M45PE80:
14691 case FLASH_5761VENDOR_ST_M_M45PE16:
14692 tp->nvram_jedecnum = JEDEC_ST;
14693 tg3_flag_set(tp, NVRAM_BUFFERED);
14694 tg3_flag_set(tp, FLASH);
14695 tp->nvram_pagesize = 256;
14700 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14703 case FLASH_5761VENDOR_ATMEL_ADB161D:
14704 case FLASH_5761VENDOR_ATMEL_MDB161D:
14705 case FLASH_5761VENDOR_ST_A_M45PE16:
14706 case FLASH_5761VENDOR_ST_M_M45PE16:
14707 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14709 case FLASH_5761VENDOR_ATMEL_ADB081D:
14710 case FLASH_5761VENDOR_ATMEL_MDB081D:
14711 case FLASH_5761VENDOR_ST_A_M45PE80:
14712 case FLASH_5761VENDOR_ST_M_M45PE80:
14713 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14715 case FLASH_5761VENDOR_ATMEL_ADB041D:
14716 case FLASH_5761VENDOR_ATMEL_MDB041D:
14717 case FLASH_5761VENDOR_ST_A_M45PE40:
14718 case FLASH_5761VENDOR_ST_M_M45PE40:
14719 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14721 case FLASH_5761VENDOR_ATMEL_ADB021D:
14722 case FLASH_5761VENDOR_ATMEL_MDB021D:
14723 case FLASH_5761VENDOR_ST_A_M45PE20:
14724 case FLASH_5761VENDOR_ST_M_M45PE20:
14725 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14731 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14733 tp->nvram_jedecnum = JEDEC_ATMEL;
14734 tg3_flag_set(tp, NVRAM_BUFFERED);
14735 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14738 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14742 nvcfg1 = tr32(NVRAM_CFG1);
14744 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14745 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14746 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14747 tp->nvram_jedecnum = JEDEC_ATMEL;
14748 tg3_flag_set(tp, NVRAM_BUFFERED);
14749 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14751 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14752 tw32(NVRAM_CFG1, nvcfg1);
14754 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14755 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14756 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14757 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14758 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14759 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14760 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14761 tp->nvram_jedecnum = JEDEC_ATMEL;
14762 tg3_flag_set(tp, NVRAM_BUFFERED);
14763 tg3_flag_set(tp, FLASH);
14765 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14766 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14767 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14768 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14769 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14771 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14772 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14773 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14775 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14776 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14777 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14781 case FLASH_5752VENDOR_ST_M45PE10:
14782 case FLASH_5752VENDOR_ST_M45PE20:
14783 case FLASH_5752VENDOR_ST_M45PE40:
14784 tp->nvram_jedecnum = JEDEC_ST;
14785 tg3_flag_set(tp, NVRAM_BUFFERED);
14786 tg3_flag_set(tp, FLASH);
14788 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14789 case FLASH_5752VENDOR_ST_M45PE10:
14790 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14792 case FLASH_5752VENDOR_ST_M45PE20:
14793 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14795 case FLASH_5752VENDOR_ST_M45PE40:
14796 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14801 tg3_flag_set(tp, NO_NVRAM);
14805 tg3_nvram_get_pagesize(tp, nvcfg1);
14806 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14807 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14811 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14815 nvcfg1 = tr32(NVRAM_CFG1);
14817 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14818 case FLASH_5717VENDOR_ATMEL_EEPROM:
14819 case FLASH_5717VENDOR_MICRO_EEPROM:
14820 tp->nvram_jedecnum = JEDEC_ATMEL;
14821 tg3_flag_set(tp, NVRAM_BUFFERED);
14822 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14824 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14825 tw32(NVRAM_CFG1, nvcfg1);
14827 case FLASH_5717VENDOR_ATMEL_MDB011D:
14828 case FLASH_5717VENDOR_ATMEL_ADB011B:
14829 case FLASH_5717VENDOR_ATMEL_ADB011D:
14830 case FLASH_5717VENDOR_ATMEL_MDB021D:
14831 case FLASH_5717VENDOR_ATMEL_ADB021B:
14832 case FLASH_5717VENDOR_ATMEL_ADB021D:
14833 case FLASH_5717VENDOR_ATMEL_45USPT:
14834 tp->nvram_jedecnum = JEDEC_ATMEL;
14835 tg3_flag_set(tp, NVRAM_BUFFERED);
14836 tg3_flag_set(tp, FLASH);
14838 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14839 case FLASH_5717VENDOR_ATMEL_MDB021D:
14840 /* Detect size with tg3_nvram_get_size() */
14842 case FLASH_5717VENDOR_ATMEL_ADB021B:
14843 case FLASH_5717VENDOR_ATMEL_ADB021D:
14844 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14847 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14851 case FLASH_5717VENDOR_ST_M_M25PE10:
14852 case FLASH_5717VENDOR_ST_A_M25PE10:
14853 case FLASH_5717VENDOR_ST_M_M45PE10:
14854 case FLASH_5717VENDOR_ST_A_M45PE10:
14855 case FLASH_5717VENDOR_ST_M_M25PE20:
14856 case FLASH_5717VENDOR_ST_A_M25PE20:
14857 case FLASH_5717VENDOR_ST_M_M45PE20:
14858 case FLASH_5717VENDOR_ST_A_M45PE20:
14859 case FLASH_5717VENDOR_ST_25USPT:
14860 case FLASH_5717VENDOR_ST_45USPT:
14861 tp->nvram_jedecnum = JEDEC_ST;
14862 tg3_flag_set(tp, NVRAM_BUFFERED);
14863 tg3_flag_set(tp, FLASH);
14865 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14866 case FLASH_5717VENDOR_ST_M_M25PE20:
14867 case FLASH_5717VENDOR_ST_M_M45PE20:
14868 /* Detect size with tg3_nvram_get_size() */
14870 case FLASH_5717VENDOR_ST_A_M25PE20:
14871 case FLASH_5717VENDOR_ST_A_M45PE20:
14872 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14875 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14880 tg3_flag_set(tp, NO_NVRAM);
14884 tg3_nvram_get_pagesize(tp, nvcfg1);
14885 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14886 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14889 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14891 u32 nvcfg1, nvmpinstrp, nv_status;
14893 nvcfg1 = tr32(NVRAM_CFG1);
14894 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14896 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14897 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14898 tg3_flag_set(tp, NO_NVRAM);
14902 switch (nvmpinstrp) {
14903 case FLASH_5762_MX25L_100:
14904 case FLASH_5762_MX25L_200:
14905 case FLASH_5762_MX25L_400:
14906 case FLASH_5762_MX25L_800:
14907 case FLASH_5762_MX25L_160_320:
14908 tp->nvram_pagesize = 4096;
14909 tp->nvram_jedecnum = JEDEC_MACRONIX;
14910 tg3_flag_set(tp, NVRAM_BUFFERED);
14911 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14912 tg3_flag_set(tp, FLASH);
14913 nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14915 (1 << (nv_status >> AUTOSENSE_DEVID &
14916 AUTOSENSE_DEVID_MASK)
14917 << AUTOSENSE_SIZE_IN_MB);
14920 case FLASH_5762_EEPROM_HD:
14921 nvmpinstrp = FLASH_5720_EEPROM_HD;
14923 case FLASH_5762_EEPROM_LD:
14924 nvmpinstrp = FLASH_5720_EEPROM_LD;
14926 case FLASH_5720VENDOR_M_ST_M45PE20:
14927 /* This pinstrap supports multiple sizes, so force it
14928 * to read the actual size from location 0xf0.
14930 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14935 switch (nvmpinstrp) {
14936 case FLASH_5720_EEPROM_HD:
14937 case FLASH_5720_EEPROM_LD:
14938 tp->nvram_jedecnum = JEDEC_ATMEL;
14939 tg3_flag_set(tp, NVRAM_BUFFERED);
14941 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14942 tw32(NVRAM_CFG1, nvcfg1);
14943 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14944 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14946 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14948 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14949 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14950 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14951 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14952 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14953 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14954 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14955 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14956 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14957 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14958 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14959 case FLASH_5720VENDOR_ATMEL_45USPT:
14960 tp->nvram_jedecnum = JEDEC_ATMEL;
14961 tg3_flag_set(tp, NVRAM_BUFFERED);
14962 tg3_flag_set(tp, FLASH);
14964 switch (nvmpinstrp) {
14965 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14966 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14967 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14968 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14970 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14971 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14972 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14973 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14975 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14976 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14977 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14980 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14981 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14985 case FLASH_5720VENDOR_M_ST_M25PE10:
14986 case FLASH_5720VENDOR_M_ST_M45PE10:
14987 case FLASH_5720VENDOR_A_ST_M25PE10:
14988 case FLASH_5720VENDOR_A_ST_M45PE10:
14989 case FLASH_5720VENDOR_M_ST_M25PE20:
14990 case FLASH_5720VENDOR_M_ST_M45PE20:
14991 case FLASH_5720VENDOR_A_ST_M25PE20:
14992 case FLASH_5720VENDOR_A_ST_M45PE20:
14993 case FLASH_5720VENDOR_M_ST_M25PE40:
14994 case FLASH_5720VENDOR_M_ST_M45PE40:
14995 case FLASH_5720VENDOR_A_ST_M25PE40:
14996 case FLASH_5720VENDOR_A_ST_M45PE40:
14997 case FLASH_5720VENDOR_M_ST_M25PE80:
14998 case FLASH_5720VENDOR_M_ST_M45PE80:
14999 case FLASH_5720VENDOR_A_ST_M25PE80:
15000 case FLASH_5720VENDOR_A_ST_M45PE80:
15001 case FLASH_5720VENDOR_ST_25USPT:
15002 case FLASH_5720VENDOR_ST_45USPT:
15003 tp->nvram_jedecnum = JEDEC_ST;
15004 tg3_flag_set(tp, NVRAM_BUFFERED);
15005 tg3_flag_set(tp, FLASH);
15007 switch (nvmpinstrp) {
15008 case FLASH_5720VENDOR_M_ST_M25PE20:
15009 case FLASH_5720VENDOR_M_ST_M45PE20:
15010 case FLASH_5720VENDOR_A_ST_M25PE20:
15011 case FLASH_5720VENDOR_A_ST_M45PE20:
15012 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
15014 case FLASH_5720VENDOR_M_ST_M25PE40:
15015 case FLASH_5720VENDOR_M_ST_M45PE40:
15016 case FLASH_5720VENDOR_A_ST_M25PE40:
15017 case FLASH_5720VENDOR_A_ST_M45PE40:
15018 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
15020 case FLASH_5720VENDOR_M_ST_M25PE80:
15021 case FLASH_5720VENDOR_M_ST_M45PE80:
15022 case FLASH_5720VENDOR_A_ST_M25PE80:
15023 case FLASH_5720VENDOR_A_ST_M45PE80:
15024 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
15027 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15028 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
15033 tg3_flag_set(tp, NO_NVRAM);
15037 tg3_nvram_get_pagesize(tp, nvcfg1);
15038 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
15039 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
15041 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
15044 if (tg3_nvram_read(tp, 0, &val))
15047 if (val != TG3_EEPROM_MAGIC &&
15048 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
15049 tg3_flag_set(tp, NO_NVRAM);
15053 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
15054 static void tg3_nvram_init(struct tg3 *tp)
15056 if (tg3_flag(tp, IS_SSB_CORE)) {
15057 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
15058 tg3_flag_clear(tp, NVRAM);
15059 tg3_flag_clear(tp, NVRAM_BUFFERED);
15060 tg3_flag_set(tp, NO_NVRAM);
15064 tw32_f(GRC_EEPROM_ADDR,
15065 (EEPROM_ADDR_FSM_RESET |
15066 (EEPROM_DEFAULT_CLOCK_PERIOD <<
15067 EEPROM_ADDR_CLKPERD_SHIFT)));
15071 /* Enable seeprom accesses. */
15072 tw32_f(GRC_LOCAL_CTRL,
15073 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
15076 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15077 tg3_asic_rev(tp) != ASIC_REV_5701) {
15078 tg3_flag_set(tp, NVRAM);
15080 if (tg3_nvram_lock(tp)) {
15081 netdev_warn(tp->dev,
15082 "Cannot get nvram lock, %s failed\n",
15086 tg3_enable_nvram_access(tp);
15088 tp->nvram_size = 0;
15090 if (tg3_asic_rev(tp) == ASIC_REV_5752)
15091 tg3_get_5752_nvram_info(tp);
15092 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15093 tg3_get_5755_nvram_info(tp);
15094 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15095 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15096 tg3_asic_rev(tp) == ASIC_REV_5785)
15097 tg3_get_5787_nvram_info(tp);
15098 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15099 tg3_get_5761_nvram_info(tp);
15100 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15101 tg3_get_5906_nvram_info(tp);
15102 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15103 tg3_flag(tp, 57765_CLASS))
15104 tg3_get_57780_nvram_info(tp);
15105 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15106 tg3_asic_rev(tp) == ASIC_REV_5719)
15107 tg3_get_5717_nvram_info(tp);
15108 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15109 tg3_asic_rev(tp) == ASIC_REV_5762)
15110 tg3_get_5720_nvram_info(tp);
15112 tg3_get_nvram_info(tp);
15114 if (tp->nvram_size == 0)
15115 tg3_get_nvram_size(tp);
15117 tg3_disable_nvram_access(tp);
15118 tg3_nvram_unlock(tp);
15121 tg3_flag_clear(tp, NVRAM);
15122 tg3_flag_clear(tp, NVRAM_BUFFERED);
15124 tg3_get_eeprom_size(tp);
15128 struct subsys_tbl_ent {
15129 u16 subsys_vendor, subsys_devid;
15133 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15134 /* Broadcom boards. */
15135 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15136 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15137 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15138 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15139 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15140 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15141 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15142 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15143 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15144 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15145 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15146 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15147 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15148 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15149 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15150 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15151 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15152 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15153 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15154 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15155 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15156 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15159 { TG3PCI_SUBVENDOR_ID_3COM,
15160 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15161 { TG3PCI_SUBVENDOR_ID_3COM,
15162 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15163 { TG3PCI_SUBVENDOR_ID_3COM,
15164 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15165 { TG3PCI_SUBVENDOR_ID_3COM,
15166 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15167 { TG3PCI_SUBVENDOR_ID_3COM,
15168 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15171 { TG3PCI_SUBVENDOR_ID_DELL,
15172 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15173 { TG3PCI_SUBVENDOR_ID_DELL,
15174 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15175 { TG3PCI_SUBVENDOR_ID_DELL,
15176 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15177 { TG3PCI_SUBVENDOR_ID_DELL,
15178 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15180 /* Compaq boards. */
15181 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15182 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15183 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15184 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15185 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15186 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15187 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15188 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15189 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15190 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15193 { TG3PCI_SUBVENDOR_ID_IBM,
15194 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15197 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15201 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15202 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15203 tp->pdev->subsystem_vendor) &&
15204 (subsys_id_to_phy_id[i].subsys_devid ==
15205 tp->pdev->subsystem_device))
15206 return &subsys_id_to_phy_id[i];
15211 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15215 tp->phy_id = TG3_PHY_ID_INVALID;
15216 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15218 /* Assume an onboard device and WOL capable by default. */
15219 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15220 tg3_flag_set(tp, WOL_CAP);
15222 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15223 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15224 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15225 tg3_flag_set(tp, IS_NIC);
15227 val = tr32(VCPU_CFGSHDW);
15228 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15229 tg3_flag_set(tp, ASPM_WORKAROUND);
15230 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15231 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15232 tg3_flag_set(tp, WOL_ENABLE);
15233 device_set_wakeup_enable(&tp->pdev->dev, true);
15238 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15239 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15240 u32 nic_cfg, led_cfg;
15241 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15242 u32 nic_phy_id, ver, eeprom_phy_id;
15243 int eeprom_phy_serdes = 0;
15245 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15246 tp->nic_sram_data_cfg = nic_cfg;
15248 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15249 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15250 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15251 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15252 tg3_asic_rev(tp) != ASIC_REV_5703 &&
15253 (ver > 0) && (ver < 0x100))
15254 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15256 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15257 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15259 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15260 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15261 tg3_asic_rev(tp) == ASIC_REV_5720)
15262 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15264 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15265 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15266 eeprom_phy_serdes = 1;
15268 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15269 if (nic_phy_id != 0) {
15270 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15271 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15273 eeprom_phy_id = (id1 >> 16) << 10;
15274 eeprom_phy_id |= (id2 & 0xfc00) << 16;
15275 eeprom_phy_id |= (id2 & 0x03ff) << 0;
15279 tp->phy_id = eeprom_phy_id;
15280 if (eeprom_phy_serdes) {
15281 if (!tg3_flag(tp, 5705_PLUS))
15282 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15284 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15287 if (tg3_flag(tp, 5750_PLUS))
15288 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15289 SHASTA_EXT_LED_MODE_MASK);
15291 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15295 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15296 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15299 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15300 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15303 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15304 tp->led_ctrl = LED_CTRL_MODE_MAC;
15306 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15307 * read on some older 5700/5701 bootcode.
15309 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15310 tg3_asic_rev(tp) == ASIC_REV_5701)
15311 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15315 case SHASTA_EXT_LED_SHARED:
15316 tp->led_ctrl = LED_CTRL_MODE_SHARED;
15317 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15318 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15319 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15320 LED_CTRL_MODE_PHY_2);
15322 if (tg3_flag(tp, 5717_PLUS) ||
15323 tg3_asic_rev(tp) == ASIC_REV_5762)
15324 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15325 LED_CTRL_BLINK_RATE_MASK;
15329 case SHASTA_EXT_LED_MAC:
15330 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15333 case SHASTA_EXT_LED_COMBO:
15334 tp->led_ctrl = LED_CTRL_MODE_COMBO;
15335 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15336 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15337 LED_CTRL_MODE_PHY_2);
15342 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15343 tg3_asic_rev(tp) == ASIC_REV_5701) &&
15344 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15345 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15347 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15348 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15350 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15351 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15352 if ((tp->pdev->subsystem_vendor ==
15353 PCI_VENDOR_ID_ARIMA) &&
15354 (tp->pdev->subsystem_device == 0x205a ||
15355 tp->pdev->subsystem_device == 0x2063))
15356 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15358 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15359 tg3_flag_set(tp, IS_NIC);
15362 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15363 tg3_flag_set(tp, ENABLE_ASF);
15364 if (tg3_flag(tp, 5750_PLUS))
15365 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15368 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15369 tg3_flag(tp, 5750_PLUS))
15370 tg3_flag_set(tp, ENABLE_APE);
15372 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15373 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15374 tg3_flag_clear(tp, WOL_CAP);
15376 if (tg3_flag(tp, WOL_CAP) &&
15377 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15378 tg3_flag_set(tp, WOL_ENABLE);
15379 device_set_wakeup_enable(&tp->pdev->dev, true);
15382 if (cfg2 & (1 << 17))
15383 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15385 /* serdes signal pre-emphasis in register 0x590 set by */
15386 /* bootcode if bit 18 is set */
15387 if (cfg2 & (1 << 18))
15388 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15390 if ((tg3_flag(tp, 57765_PLUS) ||
15391 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15392 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15393 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15394 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15396 if (tg3_flag(tp, PCI_EXPRESS)) {
15399 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15400 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15401 !tg3_flag(tp, 57765_PLUS) &&
15402 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15403 tg3_flag_set(tp, ASPM_WORKAROUND);
15404 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15405 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15406 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15407 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15410 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15411 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15412 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15413 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15414 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15415 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15417 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15418 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15421 if (tg3_flag(tp, WOL_CAP))
15422 device_set_wakeup_enable(&tp->pdev->dev,
15423 tg3_flag(tp, WOL_ENABLE));
15425 device_set_wakeup_capable(&tp->pdev->dev, false);
15428 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15431 u32 val2, off = offset * 8;
15433 err = tg3_nvram_lock(tp);
15437 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15438 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15439 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15440 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15443 for (i = 0; i < 100; i++) {
15444 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15445 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15446 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15452 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15454 tg3_nvram_unlock(tp);
15455 if (val2 & APE_OTP_STATUS_CMD_DONE)
15461 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15466 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15467 tw32(OTP_CTRL, cmd);
15469 /* Wait for up to 1 ms for command to execute. */
15470 for (i = 0; i < 100; i++) {
15471 val = tr32(OTP_STATUS);
15472 if (val & OTP_STATUS_CMD_DONE)
15477 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15480 /* Read the gphy configuration from the OTP region of the chip. The gphy
15481 * configuration is a 32-bit value that straddles the alignment boundary.
15482 * We do two 32-bit reads and then shift and merge the results.
15484 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15486 u32 bhalf_otp, thalf_otp;
15488 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15490 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15493 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15495 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15498 thalf_otp = tr32(OTP_READ_DATA);
15500 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15502 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15505 bhalf_otp = tr32(OTP_READ_DATA);
15507 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15510 static void tg3_phy_init_link_config(struct tg3 *tp)
15512 u32 adv = ADVERTISED_Autoneg;
15514 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15515 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15516 adv |= ADVERTISED_1000baseT_Half;
15517 adv |= ADVERTISED_1000baseT_Full;
15520 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15521 adv |= ADVERTISED_100baseT_Half |
15522 ADVERTISED_100baseT_Full |
15523 ADVERTISED_10baseT_Half |
15524 ADVERTISED_10baseT_Full |
15527 adv |= ADVERTISED_FIBRE;
15529 tp->link_config.advertising = adv;
15530 tp->link_config.speed = SPEED_UNKNOWN;
15531 tp->link_config.duplex = DUPLEX_UNKNOWN;
15532 tp->link_config.autoneg = AUTONEG_ENABLE;
15533 tp->link_config.active_speed = SPEED_UNKNOWN;
15534 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15539 static int tg3_phy_probe(struct tg3 *tp)
15541 u32 hw_phy_id_1, hw_phy_id_2;
15542 u32 hw_phy_id, hw_phy_id_masked;
15545 /* flow control autonegotiation is default behavior */
15546 tg3_flag_set(tp, PAUSE_AUTONEG);
15547 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15549 if (tg3_flag(tp, ENABLE_APE)) {
15550 switch (tp->pci_fn) {
15552 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15555 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15558 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15561 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15566 if (!tg3_flag(tp, ENABLE_ASF) &&
15567 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15568 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15569 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15570 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15572 if (tg3_flag(tp, USE_PHYLIB))
15573 return tg3_phy_init(tp);
15575 /* Reading the PHY ID register can conflict with ASF
15576 * firmware access to the PHY hardware.
15579 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15580 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15582 /* Now read the physical PHY_ID from the chip and verify
15583 * that it is sane. If it doesn't look good, we fall back
15584 * to either the hard-coded table based PHY_ID and failing
15585 * that the value found in the eeprom area.
15587 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15588 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15590 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15591 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15592 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15594 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15597 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15598 tp->phy_id = hw_phy_id;
15599 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15600 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15602 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15604 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15605 /* Do nothing, phy ID already set up in
15606 * tg3_get_eeprom_hw_cfg().
15609 struct subsys_tbl_ent *p;
15611 /* No eeprom signature? Try the hardcoded
15612 * subsys device table.
15614 p = tg3_lookup_by_subsys(tp);
15616 tp->phy_id = p->phy_id;
15617 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15618 /* For now we saw the IDs 0xbc050cd0,
15619 * 0xbc050f80 and 0xbc050c30 on devices
15620 * connected to an BCM4785 and there are
15621 * probably more. Just assume that the phy is
15622 * supported when it is connected to a SSB core
15629 tp->phy_id == TG3_PHY_ID_BCM8002)
15630 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15634 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15635 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15636 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15637 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15638 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15639 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15640 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15641 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15642 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15643 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15645 tp->eee.supported = SUPPORTED_100baseT_Full |
15646 SUPPORTED_1000baseT_Full;
15647 tp->eee.advertised = ADVERTISED_100baseT_Full |
15648 ADVERTISED_1000baseT_Full;
15649 tp->eee.eee_enabled = 1;
15650 tp->eee.tx_lpi_enabled = 1;
15651 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15654 tg3_phy_init_link_config(tp);
15656 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15657 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15658 !tg3_flag(tp, ENABLE_APE) &&
15659 !tg3_flag(tp, ENABLE_ASF)) {
15662 tg3_readphy(tp, MII_BMSR, &bmsr);
15663 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15664 (bmsr & BMSR_LSTATUS))
15665 goto skip_phy_reset;
15667 err = tg3_phy_reset(tp);
15671 tg3_phy_set_wirespeed(tp);
15673 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15674 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15675 tp->link_config.flowctrl);
15677 tg3_writephy(tp, MII_BMCR,
15678 BMCR_ANENABLE | BMCR_ANRESTART);
15683 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15684 err = tg3_init_5401phy_dsp(tp);
15688 err = tg3_init_5401phy_dsp(tp);
15694 static void tg3_read_vpd(struct tg3 *tp)
15697 unsigned int len, vpdlen;
15700 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15704 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15705 PCI_VPD_RO_KEYWORD_MFR_ID, &len);
15709 if (len != 4 || memcmp(vpd_data + i, "1028", 4))
15712 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15713 PCI_VPD_RO_KEYWORD_VENDOR0, &len);
15717 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15718 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i);
15721 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15722 PCI_VPD_RO_KEYWORD_PARTNO, &len);
15724 goto out_not_found;
15726 if (len > TG3_BPN_SIZE)
15727 goto out_not_found;
15729 memcpy(tp->board_part_number, &vpd_data[i], len);
15733 if (tp->board_part_number[0])
15737 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15738 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15739 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15740 strcpy(tp->board_part_number, "BCM5717");
15741 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15742 strcpy(tp->board_part_number, "BCM5718");
15745 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15746 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15747 strcpy(tp->board_part_number, "BCM57780");
15748 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15749 strcpy(tp->board_part_number, "BCM57760");
15750 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15751 strcpy(tp->board_part_number, "BCM57790");
15752 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15753 strcpy(tp->board_part_number, "BCM57788");
15756 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15757 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15758 strcpy(tp->board_part_number, "BCM57761");
15759 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15760 strcpy(tp->board_part_number, "BCM57765");
15761 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15762 strcpy(tp->board_part_number, "BCM57781");
15763 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15764 strcpy(tp->board_part_number, "BCM57785");
15765 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15766 strcpy(tp->board_part_number, "BCM57791");
15767 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15768 strcpy(tp->board_part_number, "BCM57795");
15771 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15772 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15773 strcpy(tp->board_part_number, "BCM57762");
15774 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15775 strcpy(tp->board_part_number, "BCM57766");
15776 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15777 strcpy(tp->board_part_number, "BCM57782");
15778 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15779 strcpy(tp->board_part_number, "BCM57786");
15782 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15783 strcpy(tp->board_part_number, "BCM95906");
15786 strcpy(tp->board_part_number, "none");
15790 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15794 if (tg3_nvram_read(tp, offset, &val) ||
15795 (val & 0xfc000000) != 0x0c000000 ||
15796 tg3_nvram_read(tp, offset + 4, &val) ||
15803 static void tg3_read_bc_ver(struct tg3 *tp)
15805 u32 val, offset, start, ver_offset;
15807 bool newver = false;
15809 if (tg3_nvram_read(tp, 0xc, &offset) ||
15810 tg3_nvram_read(tp, 0x4, &start))
15813 offset = tg3_nvram_logical_addr(tp, offset);
15815 if (tg3_nvram_read(tp, offset, &val))
15818 if ((val & 0xfc000000) == 0x0c000000) {
15819 if (tg3_nvram_read(tp, offset + 4, &val))
15826 dst_off = strlen(tp->fw_ver);
15829 if (TG3_VER_SIZE - dst_off < 16 ||
15830 tg3_nvram_read(tp, offset + 8, &ver_offset))
15833 offset = offset + ver_offset - start;
15834 for (i = 0; i < 16; i += 4) {
15836 if (tg3_nvram_read_be32(tp, offset + i, &v))
15839 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15844 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15847 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15848 TG3_NVM_BCVER_MAJSFT;
15849 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15850 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15851 "v%d.%02d", major, minor);
15855 static void tg3_read_hwsb_ver(struct tg3 *tp)
15857 u32 val, major, minor;
15859 /* Use native endian representation */
15860 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15863 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15864 TG3_NVM_HWSB_CFG1_MAJSFT;
15865 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15866 TG3_NVM_HWSB_CFG1_MINSFT;
15868 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15871 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15873 u32 offset, major, minor, build;
15875 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15877 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15880 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15881 case TG3_EEPROM_SB_REVISION_0:
15882 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15884 case TG3_EEPROM_SB_REVISION_2:
15885 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15887 case TG3_EEPROM_SB_REVISION_3:
15888 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15890 case TG3_EEPROM_SB_REVISION_4:
15891 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15893 case TG3_EEPROM_SB_REVISION_5:
15894 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15896 case TG3_EEPROM_SB_REVISION_6:
15897 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15903 if (tg3_nvram_read(tp, offset, &val))
15906 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15907 TG3_EEPROM_SB_EDH_BLD_SHFT;
15908 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15909 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15910 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15912 if (minor > 99 || build > 26)
15915 offset = strlen(tp->fw_ver);
15916 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15917 " v%d.%02d", major, minor);
15920 offset = strlen(tp->fw_ver);
15921 if (offset < TG3_VER_SIZE - 1)
15922 tp->fw_ver[offset] = 'a' + build - 1;
15926 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15928 u32 val, offset, start;
15931 for (offset = TG3_NVM_DIR_START;
15932 offset < TG3_NVM_DIR_END;
15933 offset += TG3_NVM_DIRENT_SIZE) {
15934 if (tg3_nvram_read(tp, offset, &val))
15937 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15941 if (offset == TG3_NVM_DIR_END)
15944 if (!tg3_flag(tp, 5705_PLUS))
15945 start = 0x08000000;
15946 else if (tg3_nvram_read(tp, offset - 4, &start))
15949 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15950 !tg3_fw_img_is_valid(tp, offset) ||
15951 tg3_nvram_read(tp, offset + 8, &val))
15954 offset += val - start;
15956 vlen = strlen(tp->fw_ver);
15958 tp->fw_ver[vlen++] = ',';
15959 tp->fw_ver[vlen++] = ' ';
15961 for (i = 0; i < 4; i++) {
15963 if (tg3_nvram_read_be32(tp, offset, &v))
15966 offset += sizeof(v);
15968 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15969 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15973 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15978 static void tg3_probe_ncsi(struct tg3 *tp)
15982 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15983 if (apedata != APE_SEG_SIG_MAGIC)
15986 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15987 if (!(apedata & APE_FW_STATUS_READY))
15990 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15991 tg3_flag_set(tp, APE_HAS_NCSI);
15994 static void tg3_read_dash_ver(struct tg3 *tp)
16000 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
16002 if (tg3_flag(tp, APE_HAS_NCSI))
16004 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
16009 vlen = strlen(tp->fw_ver);
16011 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
16013 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
16014 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
16015 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
16016 (apedata & APE_FW_VERSION_BLDMSK));
16019 static void tg3_read_otp_ver(struct tg3 *tp)
16023 if (tg3_asic_rev(tp) != ASIC_REV_5762)
16026 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
16027 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
16028 TG3_OTP_MAGIC0_VALID(val)) {
16029 u64 val64 = (u64) val << 32 | val2;
16033 for (i = 0; i < 7; i++) {
16034 if ((val64 & 0xff) == 0)
16036 ver = val64 & 0xff;
16039 vlen = strlen(tp->fw_ver);
16040 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
16044 static void tg3_read_fw_ver(struct tg3 *tp)
16047 bool vpd_vers = false;
16049 if (tp->fw_ver[0] != 0)
16052 if (tg3_flag(tp, NO_NVRAM)) {
16053 strcat(tp->fw_ver, "sb");
16054 tg3_read_otp_ver(tp);
16058 if (tg3_nvram_read(tp, 0, &val))
16061 if (val == TG3_EEPROM_MAGIC)
16062 tg3_read_bc_ver(tp);
16063 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
16064 tg3_read_sb_ver(tp, val);
16065 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16066 tg3_read_hwsb_ver(tp);
16068 if (tg3_flag(tp, ENABLE_ASF)) {
16069 if (tg3_flag(tp, ENABLE_APE)) {
16070 tg3_probe_ncsi(tp);
16072 tg3_read_dash_ver(tp);
16073 } else if (!vpd_vers) {
16074 tg3_read_mgmtfw_ver(tp);
16078 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16081 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16083 if (tg3_flag(tp, LRG_PROD_RING_CAP))
16084 return TG3_RX_RET_MAX_SIZE_5717;
16085 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16086 return TG3_RX_RET_MAX_SIZE_5700;
16088 return TG3_RX_RET_MAX_SIZE_5705;
16091 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16092 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16093 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16094 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16098 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16100 struct pci_dev *peer;
16101 unsigned int func, devnr = tp->pdev->devfn & ~7;
16103 for (func = 0; func < 8; func++) {
16104 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16105 if (peer && peer != tp->pdev)
16109 /* 5704 can be configured in single-port mode, set peer to
16110 * tp->pdev in that case.
16118 * We don't need to keep the refcount elevated; there's no way
16119 * to remove one half of this device without removing the other
16126 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16128 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16129 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16132 /* All devices that use the alternate
16133 * ASIC REV location have a CPMU.
16135 tg3_flag_set(tp, CPMU_PRESENT);
16137 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16138 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16139 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16140 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16141 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16142 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16143 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16144 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16145 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16146 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16147 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16148 reg = TG3PCI_GEN2_PRODID_ASICREV;
16149 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16150 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16151 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16152 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16153 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16154 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16155 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16156 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16157 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16158 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16159 reg = TG3PCI_GEN15_PRODID_ASICREV;
16161 reg = TG3PCI_PRODID_ASICREV;
16163 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16166 /* Wrong chip ID in 5752 A0. This code can be removed later
16167 * as A0 is not in production.
16169 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16170 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16172 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16173 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16175 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16176 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16177 tg3_asic_rev(tp) == ASIC_REV_5720)
16178 tg3_flag_set(tp, 5717_PLUS);
16180 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16181 tg3_asic_rev(tp) == ASIC_REV_57766)
16182 tg3_flag_set(tp, 57765_CLASS);
16184 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16185 tg3_asic_rev(tp) == ASIC_REV_5762)
16186 tg3_flag_set(tp, 57765_PLUS);
16188 /* Intentionally exclude ASIC_REV_5906 */
16189 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16190 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16191 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16192 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16193 tg3_asic_rev(tp) == ASIC_REV_5785 ||
16194 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16195 tg3_flag(tp, 57765_PLUS))
16196 tg3_flag_set(tp, 5755_PLUS);
16198 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16199 tg3_asic_rev(tp) == ASIC_REV_5714)
16200 tg3_flag_set(tp, 5780_CLASS);
16202 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16203 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16204 tg3_asic_rev(tp) == ASIC_REV_5906 ||
16205 tg3_flag(tp, 5755_PLUS) ||
16206 tg3_flag(tp, 5780_CLASS))
16207 tg3_flag_set(tp, 5750_PLUS);
16209 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16210 tg3_flag(tp, 5750_PLUS))
16211 tg3_flag_set(tp, 5705_PLUS);
16214 static bool tg3_10_100_only_device(struct tg3 *tp,
16215 const struct pci_device_id *ent)
16217 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16219 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16220 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16221 (tp->phy_flags & TG3_PHYFLG_IS_FET))
16224 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16225 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16226 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16236 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16239 u32 pci_state_reg, grc_misc_cfg;
16244 /* Force memory write invalidate off. If we leave it on,
16245 * then on 5700_BX chips we have to enable a workaround.
16246 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16247 * to match the cacheline size. The Broadcom driver have this
16248 * workaround but turns MWI off all the times so never uses
16249 * it. This seems to suggest that the workaround is insufficient.
16251 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16252 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16253 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16255 /* Important! -- Make sure register accesses are byteswapped
16256 * correctly. Also, for those chips that require it, make
16257 * sure that indirect register accesses are enabled before
16258 * the first operation.
16260 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16262 tp->misc_host_ctrl |= (misc_ctrl_reg &
16263 MISC_HOST_CTRL_CHIPREV);
16264 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16265 tp->misc_host_ctrl);
16267 tg3_detect_asic_rev(tp, misc_ctrl_reg);
16269 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16270 * we need to disable memory and use config. cycles
16271 * only to access all registers. The 5702/03 chips
16272 * can mistakenly decode the special cycles from the
16273 * ICH chipsets as memory write cycles, causing corruption
16274 * of register and memory space. Only certain ICH bridges
16275 * will drive special cycles with non-zero data during the
16276 * address phase which can fall within the 5703's address
16277 * range. This is not an ICH bug as the PCI spec allows
16278 * non-zero address during special cycles. However, only
16279 * these ICH bridges are known to drive non-zero addresses
16280 * during special cycles.
16282 * Since special cycles do not cross PCI bridges, we only
16283 * enable this workaround if the 5703 is on the secondary
16284 * bus of these ICH bridges.
16286 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16287 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16288 static struct tg3_dev_id {
16292 } ich_chipsets[] = {
16293 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16295 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16297 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16299 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16303 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16304 struct pci_dev *bridge = NULL;
16306 while (pci_id->vendor != 0) {
16307 bridge = pci_get_device(pci_id->vendor, pci_id->device,
16313 if (pci_id->rev != PCI_ANY_ID) {
16314 if (bridge->revision > pci_id->rev)
16317 if (bridge->subordinate &&
16318 (bridge->subordinate->number ==
16319 tp->pdev->bus->number)) {
16320 tg3_flag_set(tp, ICH_WORKAROUND);
16321 pci_dev_put(bridge);
16327 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16328 static struct tg3_dev_id {
16331 } bridge_chipsets[] = {
16332 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16333 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16336 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16337 struct pci_dev *bridge = NULL;
16339 while (pci_id->vendor != 0) {
16340 bridge = pci_get_device(pci_id->vendor,
16347 if (bridge->subordinate &&
16348 (bridge->subordinate->number <=
16349 tp->pdev->bus->number) &&
16350 (bridge->subordinate->busn_res.end >=
16351 tp->pdev->bus->number)) {
16352 tg3_flag_set(tp, 5701_DMA_BUG);
16353 pci_dev_put(bridge);
16359 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16360 * DMA addresses > 40-bit. This bridge may have other additional
16361 * 57xx devices behind it in some 4-port NIC designs for example.
16362 * Any tg3 device found behind the bridge will also need the 40-bit
16365 if (tg3_flag(tp, 5780_CLASS)) {
16366 tg3_flag_set(tp, 40BIT_DMA_BUG);
16367 tp->msi_cap = tp->pdev->msi_cap;
16369 struct pci_dev *bridge = NULL;
16372 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16373 PCI_DEVICE_ID_SERVERWORKS_EPB,
16375 if (bridge && bridge->subordinate &&
16376 (bridge->subordinate->number <=
16377 tp->pdev->bus->number) &&
16378 (bridge->subordinate->busn_res.end >=
16379 tp->pdev->bus->number)) {
16380 tg3_flag_set(tp, 40BIT_DMA_BUG);
16381 pci_dev_put(bridge);
16387 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16388 tg3_asic_rev(tp) == ASIC_REV_5714)
16389 tp->pdev_peer = tg3_find_peer(tp);
16391 /* Determine TSO capabilities */
16392 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16393 ; /* Do nothing. HW bug. */
16394 else if (tg3_flag(tp, 57765_PLUS))
16395 tg3_flag_set(tp, HW_TSO_3);
16396 else if (tg3_flag(tp, 5755_PLUS) ||
16397 tg3_asic_rev(tp) == ASIC_REV_5906)
16398 tg3_flag_set(tp, HW_TSO_2);
16399 else if (tg3_flag(tp, 5750_PLUS)) {
16400 tg3_flag_set(tp, HW_TSO_1);
16401 tg3_flag_set(tp, TSO_BUG);
16402 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16403 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16404 tg3_flag_clear(tp, TSO_BUG);
16405 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16406 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16407 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16408 tg3_flag_set(tp, FW_TSO);
16409 tg3_flag_set(tp, TSO_BUG);
16410 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16411 tp->fw_needed = FIRMWARE_TG3TSO5;
16413 tp->fw_needed = FIRMWARE_TG3TSO;
16416 /* Selectively allow TSO based on operating conditions */
16417 if (tg3_flag(tp, HW_TSO_1) ||
16418 tg3_flag(tp, HW_TSO_2) ||
16419 tg3_flag(tp, HW_TSO_3) ||
16420 tg3_flag(tp, FW_TSO)) {
16421 /* For firmware TSO, assume ASF is disabled.
16422 * We'll disable TSO later if we discover ASF
16423 * is enabled in tg3_get_eeprom_hw_cfg().
16425 tg3_flag_set(tp, TSO_CAPABLE);
16427 tg3_flag_clear(tp, TSO_CAPABLE);
16428 tg3_flag_clear(tp, TSO_BUG);
16429 tp->fw_needed = NULL;
16432 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16433 tp->fw_needed = FIRMWARE_TG3;
16435 if (tg3_asic_rev(tp) == ASIC_REV_57766)
16436 tp->fw_needed = FIRMWARE_TG357766;
16440 if (tg3_flag(tp, 5750_PLUS)) {
16441 tg3_flag_set(tp, SUPPORT_MSI);
16442 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16443 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16444 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16445 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16446 tp->pdev_peer == tp->pdev))
16447 tg3_flag_clear(tp, SUPPORT_MSI);
16449 if (tg3_flag(tp, 5755_PLUS) ||
16450 tg3_asic_rev(tp) == ASIC_REV_5906) {
16451 tg3_flag_set(tp, 1SHOT_MSI);
16454 if (tg3_flag(tp, 57765_PLUS)) {
16455 tg3_flag_set(tp, SUPPORT_MSIX);
16456 tp->irq_max = TG3_IRQ_MAX_VECS;
16462 if (tp->irq_max > 1) {
16463 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16464 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16466 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16467 tg3_asic_rev(tp) == ASIC_REV_5720)
16468 tp->txq_max = tp->irq_max - 1;
16471 if (tg3_flag(tp, 5755_PLUS) ||
16472 tg3_asic_rev(tp) == ASIC_REV_5906)
16473 tg3_flag_set(tp, SHORT_DMA_BUG);
16475 if (tg3_asic_rev(tp) == ASIC_REV_5719)
16476 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16478 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16479 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16480 tg3_asic_rev(tp) == ASIC_REV_5720 ||
16481 tg3_asic_rev(tp) == ASIC_REV_5762)
16482 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16484 if (tg3_flag(tp, 57765_PLUS) &&
16485 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16486 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16488 if (!tg3_flag(tp, 5705_PLUS) ||
16489 tg3_flag(tp, 5780_CLASS) ||
16490 tg3_flag(tp, USE_JUMBO_BDFLAG))
16491 tg3_flag_set(tp, JUMBO_CAPABLE);
16493 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16496 if (pci_is_pcie(tp->pdev)) {
16499 tg3_flag_set(tp, PCI_EXPRESS);
16501 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16502 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16503 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16504 tg3_flag_clear(tp, HW_TSO_2);
16505 tg3_flag_clear(tp, TSO_CAPABLE);
16507 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16508 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16509 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16510 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16511 tg3_flag_set(tp, CLKREQ_BUG);
16512 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16513 tg3_flag_set(tp, L1PLLPD_EN);
16515 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16516 /* BCM5785 devices are effectively PCIe devices, and should
16517 * follow PCIe codepaths, but do not have a PCIe capabilities
16520 tg3_flag_set(tp, PCI_EXPRESS);
16521 } else if (!tg3_flag(tp, 5705_PLUS) ||
16522 tg3_flag(tp, 5780_CLASS)) {
16523 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16524 if (!tp->pcix_cap) {
16525 dev_err(&tp->pdev->dev,
16526 "Cannot find PCI-X capability, aborting\n");
16530 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16531 tg3_flag_set(tp, PCIX_MODE);
16534 /* If we have an AMD 762 or VIA K8T800 chipset, write
16535 * reordering to the mailbox registers done by the host
16536 * controller can cause major troubles. We read back from
16537 * every mailbox register write to force the writes to be
16538 * posted to the chip in order.
16540 if (pci_dev_present(tg3_write_reorder_chipsets) &&
16541 !tg3_flag(tp, PCI_EXPRESS))
16542 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16544 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16545 &tp->pci_cacheline_sz);
16546 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16547 &tp->pci_lat_timer);
16548 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16549 tp->pci_lat_timer < 64) {
16550 tp->pci_lat_timer = 64;
16551 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16552 tp->pci_lat_timer);
16555 /* Important! -- It is critical that the PCI-X hw workaround
16556 * situation is decided before the first MMIO register access.
16558 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16559 /* 5700 BX chips need to have their TX producer index
16560 * mailboxes written twice to workaround a bug.
16562 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16564 /* If we are in PCI-X mode, enable register write workaround.
16566 * The workaround is to use indirect register accesses
16567 * for all chip writes not to mailbox registers.
16569 if (tg3_flag(tp, PCIX_MODE)) {
16572 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16574 /* The chip can have it's power management PCI config
16575 * space registers clobbered due to this bug.
16576 * So explicitly force the chip into D0 here.
16578 pci_read_config_dword(tp->pdev,
16579 tp->pdev->pm_cap + PCI_PM_CTRL,
16581 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16582 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16583 pci_write_config_dword(tp->pdev,
16584 tp->pdev->pm_cap + PCI_PM_CTRL,
16587 /* Also, force SERR#/PERR# in PCI command. */
16588 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16589 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16590 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16594 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16595 tg3_flag_set(tp, PCI_HIGH_SPEED);
16596 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16597 tg3_flag_set(tp, PCI_32BIT);
16599 /* Chip-specific fixup from Broadcom driver */
16600 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16601 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16602 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16603 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16606 /* Default fast path register access methods */
16607 tp->read32 = tg3_read32;
16608 tp->write32 = tg3_write32;
16609 tp->read32_mbox = tg3_read32;
16610 tp->write32_mbox = tg3_write32;
16611 tp->write32_tx_mbox = tg3_write32;
16612 tp->write32_rx_mbox = tg3_write32;
16614 /* Various workaround register access methods */
16615 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16616 tp->write32 = tg3_write_indirect_reg32;
16617 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16618 (tg3_flag(tp, PCI_EXPRESS) &&
16619 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16621 * Back to back register writes can cause problems on these
16622 * chips, the workaround is to read back all reg writes
16623 * except those to mailbox regs.
16625 * See tg3_write_indirect_reg32().
16627 tp->write32 = tg3_write_flush_reg32;
16630 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16631 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16632 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16633 tp->write32_rx_mbox = tg3_write_flush_reg32;
16636 if (tg3_flag(tp, ICH_WORKAROUND)) {
16637 tp->read32 = tg3_read_indirect_reg32;
16638 tp->write32 = tg3_write_indirect_reg32;
16639 tp->read32_mbox = tg3_read_indirect_mbox;
16640 tp->write32_mbox = tg3_write_indirect_mbox;
16641 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16642 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16647 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16648 pci_cmd &= ~PCI_COMMAND_MEMORY;
16649 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16651 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16652 tp->read32_mbox = tg3_read32_mbox_5906;
16653 tp->write32_mbox = tg3_write32_mbox_5906;
16654 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16655 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16658 if (tp->write32 == tg3_write_indirect_reg32 ||
16659 (tg3_flag(tp, PCIX_MODE) &&
16660 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16661 tg3_asic_rev(tp) == ASIC_REV_5701)))
16662 tg3_flag_set(tp, SRAM_USE_CONFIG);
16664 /* The memory arbiter has to be enabled in order for SRAM accesses
16665 * to succeed. Normally on powerup the tg3 chip firmware will make
16666 * sure it is enabled, but other entities such as system netboot
16667 * code might disable it.
16669 val = tr32(MEMARB_MODE);
16670 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16672 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16673 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16674 tg3_flag(tp, 5780_CLASS)) {
16675 if (tg3_flag(tp, PCIX_MODE)) {
16676 pci_read_config_dword(tp->pdev,
16677 tp->pcix_cap + PCI_X_STATUS,
16679 tp->pci_fn = val & 0x7;
16681 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16682 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16683 tg3_asic_rev(tp) == ASIC_REV_5720) {
16684 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16685 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16686 val = tr32(TG3_CPMU_STATUS);
16688 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16689 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16691 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16692 TG3_CPMU_STATUS_FSHFT_5719;
16695 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16696 tp->write32_tx_mbox = tg3_write_flush_reg32;
16697 tp->write32_rx_mbox = tg3_write_flush_reg32;
16700 /* Get eeprom hw config before calling tg3_set_power_state().
16701 * In particular, the TG3_FLAG_IS_NIC flag must be
16702 * determined before calling tg3_set_power_state() so that
16703 * we know whether or not to switch out of Vaux power.
16704 * When the flag is set, it means that GPIO1 is used for eeprom
16705 * write protect and also implies that it is a LOM where GPIOs
16706 * are not used to switch power.
16708 tg3_get_eeprom_hw_cfg(tp);
16710 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16711 tg3_flag_clear(tp, TSO_CAPABLE);
16712 tg3_flag_clear(tp, TSO_BUG);
16713 tp->fw_needed = NULL;
16716 if (tg3_flag(tp, ENABLE_APE)) {
16717 /* Allow reads and writes to the
16718 * APE register and memory space.
16720 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16721 PCISTATE_ALLOW_APE_SHMEM_WR |
16722 PCISTATE_ALLOW_APE_PSPACE_WR;
16723 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16726 tg3_ape_lock_init(tp);
16727 tp->ape_hb_interval =
16728 msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16731 /* Set up tp->grc_local_ctrl before calling
16732 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16733 * will bring 5700's external PHY out of reset.
16734 * It is also used as eeprom write protect on LOMs.
16736 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16737 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16738 tg3_flag(tp, EEPROM_WRITE_PROT))
16739 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16740 GRC_LCLCTRL_GPIO_OUTPUT1);
16741 /* Unused GPIO3 must be driven as output on 5752 because there
16742 * are no pull-up resistors on unused GPIO pins.
16744 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16745 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16747 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16748 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16749 tg3_flag(tp, 57765_CLASS))
16750 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16752 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16753 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16754 /* Turn off the debug UART. */
16755 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16756 if (tg3_flag(tp, IS_NIC))
16757 /* Keep VMain power. */
16758 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16759 GRC_LCLCTRL_GPIO_OUTPUT0;
16762 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16763 tp->grc_local_ctrl |=
16764 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16766 /* Switch out of Vaux if it is a NIC */
16767 tg3_pwrsrc_switch_to_vmain(tp);
16769 /* Derive initial jumbo mode from MTU assigned in
16770 * ether_setup() via the alloc_etherdev() call
16772 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16773 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16775 /* Determine WakeOnLan speed to use. */
16776 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16777 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16778 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16779 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16780 tg3_flag_clear(tp, WOL_SPEED_100MB);
16782 tg3_flag_set(tp, WOL_SPEED_100MB);
16785 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16786 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16788 /* A few boards don't want Ethernet@WireSpeed phy feature */
16789 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16790 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16791 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16792 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16793 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16794 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16795 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16797 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16798 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16799 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16800 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16801 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16803 if (tg3_flag(tp, 5705_PLUS) &&
16804 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16805 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16806 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16807 !tg3_flag(tp, 57765_PLUS)) {
16808 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16809 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16810 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16811 tg3_asic_rev(tp) == ASIC_REV_5761) {
16812 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16813 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16814 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16815 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16816 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16818 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16821 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16822 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16823 tp->phy_otp = tg3_read_otp_phycfg(tp);
16824 if (tp->phy_otp == 0)
16825 tp->phy_otp = TG3_OTP_DEFAULT;
16828 if (tg3_flag(tp, CPMU_PRESENT))
16829 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16831 tp->mi_mode = MAC_MI_MODE_BASE;
16833 tp->coalesce_mode = 0;
16834 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16835 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16836 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16838 /* Set these bits to enable statistics workaround. */
16839 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16840 tg3_asic_rev(tp) == ASIC_REV_5762 ||
16841 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16842 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16843 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16844 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16847 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16848 tg3_asic_rev(tp) == ASIC_REV_57780)
16849 tg3_flag_set(tp, USE_PHYLIB);
16851 err = tg3_mdio_init(tp);
16855 /* Initialize data/descriptor byte/word swapping. */
16856 val = tr32(GRC_MODE);
16857 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16858 tg3_asic_rev(tp) == ASIC_REV_5762)
16859 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16860 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16861 GRC_MODE_B2HRX_ENABLE |
16862 GRC_MODE_HTX2B_ENABLE |
16863 GRC_MODE_HOST_STACKUP);
16865 val &= GRC_MODE_HOST_STACKUP;
16867 tw32(GRC_MODE, val | tp->grc_mode);
16869 tg3_switch_clocks(tp);
16871 /* Clear this out for sanity. */
16872 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16874 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16875 tw32(TG3PCI_REG_BASE_ADDR, 0);
16877 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16879 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16880 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16881 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16882 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16883 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16884 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16885 void __iomem *sram_base;
16887 /* Write some dummy words into the SRAM status block
16888 * area, see if it reads back correctly. If the return
16889 * value is bad, force enable the PCIX workaround.
16891 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16893 writel(0x00000000, sram_base);
16894 writel(0x00000000, sram_base + 4);
16895 writel(0xffffffff, sram_base + 4);
16896 if (readl(sram_base) != 0x00000000)
16897 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16902 tg3_nvram_init(tp);
16904 /* If the device has an NVRAM, no need to load patch firmware */
16905 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16906 !tg3_flag(tp, NO_NVRAM))
16907 tp->fw_needed = NULL;
16909 grc_misc_cfg = tr32(GRC_MISC_CFG);
16910 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16912 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16913 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16914 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16915 tg3_flag_set(tp, IS_5788);
16917 if (!tg3_flag(tp, IS_5788) &&
16918 tg3_asic_rev(tp) != ASIC_REV_5700)
16919 tg3_flag_set(tp, TAGGED_STATUS);
16920 if (tg3_flag(tp, TAGGED_STATUS)) {
16921 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16922 HOSTCC_MODE_CLRTICK_TXBD);
16924 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16925 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16926 tp->misc_host_ctrl);
16929 /* Preserve the APE MAC_MODE bits */
16930 if (tg3_flag(tp, ENABLE_APE))
16931 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16935 if (tg3_10_100_only_device(tp, ent))
16936 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16938 err = tg3_phy_probe(tp);
16940 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16941 /* ... but do not return immediately ... */
16946 tg3_read_fw_ver(tp);
16948 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16949 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16951 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16952 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16954 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16957 /* 5700 {AX,BX} chips have a broken status block link
16958 * change bit implementation, so we must use the
16959 * status register in those cases.
16961 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16962 tg3_flag_set(tp, USE_LINKCHG_REG);
16964 tg3_flag_clear(tp, USE_LINKCHG_REG);
16966 /* The led_ctrl is set during tg3_phy_probe, here we might
16967 * have to force the link status polling mechanism based
16968 * upon subsystem IDs.
16970 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16971 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16972 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16973 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16974 tg3_flag_set(tp, USE_LINKCHG_REG);
16977 /* For all SERDES we poll the MAC status register. */
16978 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16979 tg3_flag_set(tp, POLL_SERDES);
16981 tg3_flag_clear(tp, POLL_SERDES);
16983 if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16984 tg3_flag_set(tp, POLL_CPMU_LINK);
16986 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16987 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16988 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16989 tg3_flag(tp, PCIX_MODE)) {
16990 tp->rx_offset = NET_SKB_PAD;
16991 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16992 tp->rx_copy_thresh = ~(u16)0;
16996 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16997 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16998 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
17000 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
17002 /* Increment the rx prod index on the rx std ring by at most
17003 * 8 for these chips to workaround hw errata.
17005 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
17006 tg3_asic_rev(tp) == ASIC_REV_5752 ||
17007 tg3_asic_rev(tp) == ASIC_REV_5755)
17008 tp->rx_std_max_post = 8;
17010 if (tg3_flag(tp, ASPM_WORKAROUND))
17011 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
17012 PCIE_PWR_MGMT_L1_THRESH_MSK;
17017 static int tg3_get_device_address(struct tg3 *tp, u8 *addr)
17019 u32 hi, lo, mac_offset;
17023 if (!eth_platform_get_mac_address(&tp->pdev->dev, addr))
17026 if (tg3_flag(tp, IS_SSB_CORE)) {
17027 err = ssb_gige_get_macaddr(tp->pdev, addr);
17028 if (!err && is_valid_ether_addr(addr))
17033 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
17034 tg3_flag(tp, 5780_CLASS)) {
17035 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
17037 if (tg3_nvram_lock(tp))
17038 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
17040 tg3_nvram_unlock(tp);
17041 } else if (tg3_flag(tp, 5717_PLUS)) {
17042 if (tp->pci_fn & 1)
17044 if (tp->pci_fn > 1)
17045 mac_offset += 0x18c;
17046 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17049 /* First try to get it from MAC address mailbox. */
17050 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17051 if ((hi >> 16) == 0x484b) {
17052 addr[0] = (hi >> 8) & 0xff;
17053 addr[1] = (hi >> 0) & 0xff;
17055 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17056 addr[2] = (lo >> 24) & 0xff;
17057 addr[3] = (lo >> 16) & 0xff;
17058 addr[4] = (lo >> 8) & 0xff;
17059 addr[5] = (lo >> 0) & 0xff;
17061 /* Some old bootcode may report a 0 MAC address in SRAM */
17062 addr_ok = is_valid_ether_addr(addr);
17065 /* Next, try NVRAM. */
17066 if (!tg3_flag(tp, NO_NVRAM) &&
17067 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
17068 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
17069 memcpy(&addr[0], ((char *)&hi) + 2, 2);
17070 memcpy(&addr[2], (char *)&lo, sizeof(lo));
17072 /* Finally just fetch it out of the MAC control regs. */
17074 hi = tr32(MAC_ADDR_0_HIGH);
17075 lo = tr32(MAC_ADDR_0_LOW);
17077 addr[5] = lo & 0xff;
17078 addr[4] = (lo >> 8) & 0xff;
17079 addr[3] = (lo >> 16) & 0xff;
17080 addr[2] = (lo >> 24) & 0xff;
17081 addr[1] = hi & 0xff;
17082 addr[0] = (hi >> 8) & 0xff;
17086 if (!is_valid_ether_addr(addr))
17091 #define BOUNDARY_SINGLE_CACHELINE 1
17092 #define BOUNDARY_MULTI_CACHELINE 2
17094 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17096 int cacheline_size;
17100 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17102 cacheline_size = 1024;
17104 cacheline_size = (int) byte * 4;
17106 /* On 5703 and later chips, the boundary bits have no
17109 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17110 tg3_asic_rev(tp) != ASIC_REV_5701 &&
17111 !tg3_flag(tp, PCI_EXPRESS))
17114 #if defined(CONFIG_PPC64) || defined(CONFIG_PARISC)
17115 goal = BOUNDARY_MULTI_CACHELINE;
17117 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17118 goal = BOUNDARY_SINGLE_CACHELINE;
17124 if (tg3_flag(tp, 57765_PLUS)) {
17125 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17132 /* PCI controllers on most RISC systems tend to disconnect
17133 * when a device tries to burst across a cache-line boundary.
17134 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17136 * Unfortunately, for PCI-E there are only limited
17137 * write-side controls for this, and thus for reads
17138 * we will still get the disconnects. We'll also waste
17139 * these PCI cycles for both read and write for chips
17140 * other than 5700 and 5701 which do not implement the
17143 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17144 switch (cacheline_size) {
17149 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17150 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17151 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17153 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17154 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17159 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17160 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17164 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17165 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17168 } else if (tg3_flag(tp, PCI_EXPRESS)) {
17169 switch (cacheline_size) {
17173 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17174 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17175 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17181 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17182 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17186 switch (cacheline_size) {
17188 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17189 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17190 DMA_RWCTRL_WRITE_BNDRY_16);
17195 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17196 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17197 DMA_RWCTRL_WRITE_BNDRY_32);
17202 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17203 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17204 DMA_RWCTRL_WRITE_BNDRY_64);
17209 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17210 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17211 DMA_RWCTRL_WRITE_BNDRY_128);
17216 val |= (DMA_RWCTRL_READ_BNDRY_256 |
17217 DMA_RWCTRL_WRITE_BNDRY_256);
17220 val |= (DMA_RWCTRL_READ_BNDRY_512 |
17221 DMA_RWCTRL_WRITE_BNDRY_512);
17225 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17226 DMA_RWCTRL_WRITE_BNDRY_1024);
17235 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17236 int size, bool to_device)
17238 struct tg3_internal_buffer_desc test_desc;
17239 u32 sram_dma_descs;
17242 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17244 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17245 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17246 tw32(RDMAC_STATUS, 0);
17247 tw32(WDMAC_STATUS, 0);
17249 tw32(BUFMGR_MODE, 0);
17250 tw32(FTQ_RESET, 0);
17252 test_desc.addr_hi = ((u64) buf_dma) >> 32;
17253 test_desc.addr_lo = buf_dma & 0xffffffff;
17254 test_desc.nic_mbuf = 0x00002100;
17255 test_desc.len = size;
17258 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17259 * the *second* time the tg3 driver was getting loaded after an
17262 * Broadcom tells me:
17263 * ...the DMA engine is connected to the GRC block and a DMA
17264 * reset may affect the GRC block in some unpredictable way...
17265 * The behavior of resets to individual blocks has not been tested.
17267 * Broadcom noted the GRC reset will also reset all sub-components.
17270 test_desc.cqid_sqid = (13 << 8) | 2;
17272 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17275 test_desc.cqid_sqid = (16 << 8) | 7;
17277 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17280 test_desc.flags = 0x00000005;
17282 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17285 val = *(((u32 *)&test_desc) + i);
17286 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17287 sram_dma_descs + (i * sizeof(u32)));
17288 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17290 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17293 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17295 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17298 for (i = 0; i < 40; i++) {
17302 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17304 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17305 if ((val & 0xffff) == sram_dma_descs) {
17316 #define TEST_BUFFER_SIZE 0x2000
17318 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17319 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17323 static int tg3_test_dma(struct tg3 *tp)
17325 dma_addr_t buf_dma;
17326 u32 *buf, saved_dma_rwctrl;
17329 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17330 &buf_dma, GFP_KERNEL);
17336 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17337 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17339 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17341 if (tg3_flag(tp, 57765_PLUS))
17344 if (tg3_flag(tp, PCI_EXPRESS)) {
17345 /* DMA read watermark not used on PCIE */
17346 tp->dma_rwctrl |= 0x00180000;
17347 } else if (!tg3_flag(tp, PCIX_MODE)) {
17348 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17349 tg3_asic_rev(tp) == ASIC_REV_5750)
17350 tp->dma_rwctrl |= 0x003f0000;
17352 tp->dma_rwctrl |= 0x003f000f;
17354 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17355 tg3_asic_rev(tp) == ASIC_REV_5704) {
17356 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17357 u32 read_water = 0x7;
17359 /* If the 5704 is behind the EPB bridge, we can
17360 * do the less restrictive ONE_DMA workaround for
17361 * better performance.
17363 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17364 tg3_asic_rev(tp) == ASIC_REV_5704)
17365 tp->dma_rwctrl |= 0x8000;
17366 else if (ccval == 0x6 || ccval == 0x7)
17367 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17369 if (tg3_asic_rev(tp) == ASIC_REV_5703)
17371 /* Set bit 23 to enable PCIX hw bug fix */
17373 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17374 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17376 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17377 /* 5780 always in PCIX mode */
17378 tp->dma_rwctrl |= 0x00144000;
17379 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17380 /* 5714 always in PCIX mode */
17381 tp->dma_rwctrl |= 0x00148000;
17383 tp->dma_rwctrl |= 0x001b000f;
17386 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17387 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17389 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17390 tg3_asic_rev(tp) == ASIC_REV_5704)
17391 tp->dma_rwctrl &= 0xfffffff0;
17393 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17394 tg3_asic_rev(tp) == ASIC_REV_5701) {
17395 /* Remove this if it causes problems for some boards. */
17396 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17398 /* On 5700/5701 chips, we need to set this bit.
17399 * Otherwise the chip will issue cacheline transactions
17400 * to streamable DMA memory with not all the byte
17401 * enables turned on. This is an error on several
17402 * RISC PCI controllers, in particular sparc64.
17404 * On 5703/5704 chips, this bit has been reassigned
17405 * a different meaning. In particular, it is used
17406 * on those chips to enable a PCI-X workaround.
17408 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17411 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17414 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17415 tg3_asic_rev(tp) != ASIC_REV_5701)
17418 /* It is best to perform DMA test with maximum write burst size
17419 * to expose the 5700/5701 write DMA bug.
17421 saved_dma_rwctrl = tp->dma_rwctrl;
17422 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17423 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17428 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17431 /* Send the buffer to the chip. */
17432 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17434 dev_err(&tp->pdev->dev,
17435 "%s: Buffer write failed. err = %d\n",
17440 /* Now read it back. */
17441 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17443 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17444 "err = %d\n", __func__, ret);
17449 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17453 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17454 DMA_RWCTRL_WRITE_BNDRY_16) {
17455 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17456 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17457 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17460 dev_err(&tp->pdev->dev,
17461 "%s: Buffer corrupted on read back! "
17462 "(%d != %d)\n", __func__, p[i], i);
17468 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17474 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17475 DMA_RWCTRL_WRITE_BNDRY_16) {
17476 /* DMA test passed without adjusting DMA boundary,
17477 * now look for chipsets that are known to expose the
17478 * DMA bug without failing the test.
17480 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17481 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17482 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17484 /* Safe to use the calculated DMA boundary. */
17485 tp->dma_rwctrl = saved_dma_rwctrl;
17488 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17492 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17497 static void tg3_init_bufmgr_config(struct tg3 *tp)
17499 if (tg3_flag(tp, 57765_PLUS)) {
17500 tp->bufmgr_config.mbuf_read_dma_low_water =
17501 DEFAULT_MB_RDMA_LOW_WATER_5705;
17502 tp->bufmgr_config.mbuf_mac_rx_low_water =
17503 DEFAULT_MB_MACRX_LOW_WATER_57765;
17504 tp->bufmgr_config.mbuf_high_water =
17505 DEFAULT_MB_HIGH_WATER_57765;
17507 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17508 DEFAULT_MB_RDMA_LOW_WATER_5705;
17509 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17510 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17511 tp->bufmgr_config.mbuf_high_water_jumbo =
17512 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17513 } else if (tg3_flag(tp, 5705_PLUS)) {
17514 tp->bufmgr_config.mbuf_read_dma_low_water =
17515 DEFAULT_MB_RDMA_LOW_WATER_5705;
17516 tp->bufmgr_config.mbuf_mac_rx_low_water =
17517 DEFAULT_MB_MACRX_LOW_WATER_5705;
17518 tp->bufmgr_config.mbuf_high_water =
17519 DEFAULT_MB_HIGH_WATER_5705;
17520 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17521 tp->bufmgr_config.mbuf_mac_rx_low_water =
17522 DEFAULT_MB_MACRX_LOW_WATER_5906;
17523 tp->bufmgr_config.mbuf_high_water =
17524 DEFAULT_MB_HIGH_WATER_5906;
17527 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17528 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17529 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17530 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17531 tp->bufmgr_config.mbuf_high_water_jumbo =
17532 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17534 tp->bufmgr_config.mbuf_read_dma_low_water =
17535 DEFAULT_MB_RDMA_LOW_WATER;
17536 tp->bufmgr_config.mbuf_mac_rx_low_water =
17537 DEFAULT_MB_MACRX_LOW_WATER;
17538 tp->bufmgr_config.mbuf_high_water =
17539 DEFAULT_MB_HIGH_WATER;
17541 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17542 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17543 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17544 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17545 tp->bufmgr_config.mbuf_high_water_jumbo =
17546 DEFAULT_MB_HIGH_WATER_JUMBO;
17549 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17550 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17553 static char *tg3_phy_string(struct tg3 *tp)
17555 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17556 case TG3_PHY_ID_BCM5400: return "5400";
17557 case TG3_PHY_ID_BCM5401: return "5401";
17558 case TG3_PHY_ID_BCM5411: return "5411";
17559 case TG3_PHY_ID_BCM5701: return "5701";
17560 case TG3_PHY_ID_BCM5703: return "5703";
17561 case TG3_PHY_ID_BCM5704: return "5704";
17562 case TG3_PHY_ID_BCM5705: return "5705";
17563 case TG3_PHY_ID_BCM5750: return "5750";
17564 case TG3_PHY_ID_BCM5752: return "5752";
17565 case TG3_PHY_ID_BCM5714: return "5714";
17566 case TG3_PHY_ID_BCM5780: return "5780";
17567 case TG3_PHY_ID_BCM5755: return "5755";
17568 case TG3_PHY_ID_BCM5787: return "5787";
17569 case TG3_PHY_ID_BCM5784: return "5784";
17570 case TG3_PHY_ID_BCM5756: return "5722/5756";
17571 case TG3_PHY_ID_BCM5906: return "5906";
17572 case TG3_PHY_ID_BCM5761: return "5761";
17573 case TG3_PHY_ID_BCM5718C: return "5718C";
17574 case TG3_PHY_ID_BCM5718S: return "5718S";
17575 case TG3_PHY_ID_BCM57765: return "57765";
17576 case TG3_PHY_ID_BCM5719C: return "5719C";
17577 case TG3_PHY_ID_BCM5720C: return "5720C";
17578 case TG3_PHY_ID_BCM5762: return "5762C";
17579 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17580 case 0: return "serdes";
17581 default: return "unknown";
17585 static char *tg3_bus_string(struct tg3 *tp, char *str)
17587 if (tg3_flag(tp, PCI_EXPRESS)) {
17588 strcpy(str, "PCI Express");
17590 } else if (tg3_flag(tp, PCIX_MODE)) {
17591 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17593 strcpy(str, "PCIX:");
17595 if ((clock_ctrl == 7) ||
17596 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17597 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17598 strcat(str, "133MHz");
17599 else if (clock_ctrl == 0)
17600 strcat(str, "33MHz");
17601 else if (clock_ctrl == 2)
17602 strcat(str, "50MHz");
17603 else if (clock_ctrl == 4)
17604 strcat(str, "66MHz");
17605 else if (clock_ctrl == 6)
17606 strcat(str, "100MHz");
17608 strcpy(str, "PCI:");
17609 if (tg3_flag(tp, PCI_HIGH_SPEED))
17610 strcat(str, "66MHz");
17612 strcat(str, "33MHz");
17614 if (tg3_flag(tp, PCI_32BIT))
17615 strcat(str, ":32-bit");
17617 strcat(str, ":64-bit");
17621 static void tg3_init_coal(struct tg3 *tp)
17623 struct ethtool_coalesce *ec = &tp->coal;
17625 memset(ec, 0, sizeof(*ec));
17626 ec->cmd = ETHTOOL_GCOALESCE;
17627 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17628 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17629 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17630 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17631 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17632 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17633 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17634 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17635 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17637 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17638 HOSTCC_MODE_CLRTICK_TXBD)) {
17639 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17640 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17641 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17642 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17645 if (tg3_flag(tp, 5705_PLUS)) {
17646 ec->rx_coalesce_usecs_irq = 0;
17647 ec->tx_coalesce_usecs_irq = 0;
17648 ec->stats_block_coalesce_usecs = 0;
17652 static int tg3_init_one(struct pci_dev *pdev,
17653 const struct pci_device_id *ent)
17655 struct net_device *dev;
17658 u32 sndmbx, rcvmbx, intmbx;
17660 u64 dma_mask, persist_dma_mask;
17661 netdev_features_t features = 0;
17662 u8 addr[ETH_ALEN] __aligned(2);
17664 err = pci_enable_device(pdev);
17666 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17670 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17672 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17673 goto err_out_disable_pdev;
17676 pci_set_master(pdev);
17678 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17681 goto err_out_free_res;
17684 SET_NETDEV_DEV(dev, &pdev->dev);
17686 tp = netdev_priv(dev);
17689 tp->rx_mode = TG3_DEF_RX_MODE;
17690 tp->tx_mode = TG3_DEF_TX_MODE;
17692 tp->pcierr_recovery = false;
17695 tp->msg_enable = tg3_debug;
17697 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17699 if (pdev_is_ssb_gige_core(pdev)) {
17700 tg3_flag_set(tp, IS_SSB_CORE);
17701 if (ssb_gige_must_flush_posted_writes(pdev))
17702 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17703 if (ssb_gige_one_dma_at_once(pdev))
17704 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17705 if (ssb_gige_have_roboswitch(pdev)) {
17706 tg3_flag_set(tp, USE_PHYLIB);
17707 tg3_flag_set(tp, ROBOSWITCH);
17709 if (ssb_gige_is_rgmii(pdev))
17710 tg3_flag_set(tp, RGMII_MODE);
17713 /* The word/byte swap controls here control register access byte
17714 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17717 tp->misc_host_ctrl =
17718 MISC_HOST_CTRL_MASK_PCI_INT |
17719 MISC_HOST_CTRL_WORD_SWAP |
17720 MISC_HOST_CTRL_INDIR_ACCESS |
17721 MISC_HOST_CTRL_PCISTATE_RW;
17723 /* The NONFRM (non-frame) byte/word swap controls take effect
17724 * on descriptor entries, anything which isn't packet data.
17726 * The StrongARM chips on the board (one for tx, one for rx)
17727 * are running in big-endian mode.
17729 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17730 GRC_MODE_WSWAP_NONFRM_DATA);
17731 #ifdef __BIG_ENDIAN
17732 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17734 spin_lock_init(&tp->lock);
17735 spin_lock_init(&tp->indirect_lock);
17736 INIT_WORK(&tp->reset_task, tg3_reset_task);
17738 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17740 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17742 goto err_out_free_dev;
17745 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17746 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17747 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17748 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17749 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17750 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17751 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17752 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17753 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17754 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17755 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17756 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17757 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17758 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17759 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17760 tg3_flag_set(tp, ENABLE_APE);
17761 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17762 if (!tp->aperegs) {
17763 dev_err(&pdev->dev,
17764 "Cannot map APE registers, aborting\n");
17766 goto err_out_iounmap;
17770 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17771 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17773 dev->ethtool_ops = &tg3_ethtool_ops;
17774 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17775 dev->netdev_ops = &tg3_netdev_ops;
17776 dev->irq = pdev->irq;
17778 err = tg3_get_invariants(tp, ent);
17780 dev_err(&pdev->dev,
17781 "Problem fetching invariants of chip, aborting\n");
17782 goto err_out_apeunmap;
17785 /* The EPB bridge inside 5714, 5715, and 5780 and any
17786 * device behind the EPB cannot support DMA addresses > 40-bit.
17787 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17788 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17789 * do DMA address check in __tg3_start_xmit().
17791 if (tg3_flag(tp, IS_5788))
17792 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17793 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17794 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17795 #ifdef CONFIG_HIGHMEM
17796 dma_mask = DMA_BIT_MASK(64);
17799 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17801 /* Configure DMA attributes. */
17802 if (dma_mask > DMA_BIT_MASK(32)) {
17803 err = dma_set_mask(&pdev->dev, dma_mask);
17805 features |= NETIF_F_HIGHDMA;
17806 err = dma_set_coherent_mask(&pdev->dev,
17809 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17810 "DMA for consistent allocations\n");
17811 goto err_out_apeunmap;
17815 if (err || dma_mask == DMA_BIT_MASK(32)) {
17816 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
17818 dev_err(&pdev->dev,
17819 "No usable DMA configuration, aborting\n");
17820 goto err_out_apeunmap;
17824 tg3_init_bufmgr_config(tp);
17826 /* 5700 B0 chips do not support checksumming correctly due
17827 * to hardware bugs.
17829 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17830 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17832 if (tg3_flag(tp, 5755_PLUS))
17833 features |= NETIF_F_IPV6_CSUM;
17836 /* TSO is on by default on chips that support hardware TSO.
17837 * Firmware TSO on older chips gives lower performance, so it
17838 * is off by default, but can be enabled using ethtool.
17840 if ((tg3_flag(tp, HW_TSO_1) ||
17841 tg3_flag(tp, HW_TSO_2) ||
17842 tg3_flag(tp, HW_TSO_3)) &&
17843 (features & NETIF_F_IP_CSUM))
17844 features |= NETIF_F_TSO;
17845 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17846 if (features & NETIF_F_IPV6_CSUM)
17847 features |= NETIF_F_TSO6;
17848 if (tg3_flag(tp, HW_TSO_3) ||
17849 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17850 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17851 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17852 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17853 tg3_asic_rev(tp) == ASIC_REV_57780)
17854 features |= NETIF_F_TSO_ECN;
17857 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17858 NETIF_F_HW_VLAN_CTAG_RX;
17859 dev->vlan_features |= features;
17862 * Add loopback capability only for a subset of devices that support
17863 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17864 * loopback for the remaining devices.
17866 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17867 !tg3_flag(tp, CPMU_PRESENT))
17868 /* Add the loopback capability */
17869 features |= NETIF_F_LOOPBACK;
17871 dev->hw_features |= features;
17872 dev->priv_flags |= IFF_UNICAST_FLT;
17874 /* MTU range: 60 - 9000 or 1500, depending on hardware */
17875 dev->min_mtu = TG3_MIN_MTU;
17876 dev->max_mtu = TG3_MAX_MTU(tp);
17878 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17879 !tg3_flag(tp, TSO_CAPABLE) &&
17880 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17881 tg3_flag_set(tp, MAX_RXPEND_64);
17882 tp->rx_pending = 63;
17885 err = tg3_get_device_address(tp, addr);
17887 dev_err(&pdev->dev,
17888 "Could not obtain valid ethernet address, aborting\n");
17889 goto err_out_apeunmap;
17891 eth_hw_addr_set(dev, addr);
17893 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17894 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17895 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17896 for (i = 0; i < tp->irq_max; i++) {
17897 struct tg3_napi *tnapi = &tp->napi[i];
17900 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17902 tnapi->int_mbox = intmbx;
17905 tnapi->consmbox = rcvmbx;
17906 tnapi->prodmbox = sndmbx;
17909 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17911 tnapi->coal_now = HOSTCC_MODE_NOW;
17913 if (!tg3_flag(tp, SUPPORT_MSIX))
17917 * If we support MSIX, we'll be using RSS. If we're using
17918 * RSS, the first vector only handles link interrupts and the
17919 * remaining vectors handle rx and tx interrupts. Reuse the
17920 * mailbox values for the next iteration. The values we setup
17921 * above are still useful for the single vectored mode.
17935 * Reset chip in case UNDI or EFI driver did not shutdown
17936 * DMA self test will enable WDMAC and we'll see (spurious)
17937 * pending DMA on the PCI bus at that point.
17939 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17940 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17941 tg3_full_lock(tp, 0);
17942 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17943 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17944 tg3_full_unlock(tp);
17947 err = tg3_test_dma(tp);
17949 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17950 goto err_out_apeunmap;
17955 pci_set_drvdata(pdev, dev);
17957 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17958 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17959 tg3_asic_rev(tp) == ASIC_REV_5762)
17960 tg3_flag_set(tp, PTP_CAPABLE);
17962 tg3_timer_init(tp);
17964 tg3_carrier_off(tp);
17966 err = register_netdev(dev);
17968 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17969 goto err_out_apeunmap;
17972 if (tg3_flag(tp, PTP_CAPABLE)) {
17974 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17976 if (IS_ERR(tp->ptp_clock))
17977 tp->ptp_clock = NULL;
17980 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17981 tp->board_part_number,
17982 tg3_chip_rev_id(tp),
17983 tg3_bus_string(tp, str),
17986 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17989 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17990 ethtype = "10/100Base-TX";
17991 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17992 ethtype = "1000Base-SX";
17994 ethtype = "10/100/1000Base-T";
17996 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17997 "(WireSpeed[%d], EEE[%d])\n",
17998 tg3_phy_string(tp), ethtype,
17999 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
18000 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
18003 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
18004 (dev->features & NETIF_F_RXCSUM) != 0,
18005 tg3_flag(tp, USE_LINKCHG_REG) != 0,
18006 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
18007 tg3_flag(tp, ENABLE_ASF) != 0,
18008 tg3_flag(tp, TSO_CAPABLE) != 0);
18009 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
18011 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
18012 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
18014 pci_save_state(pdev);
18020 iounmap(tp->aperegs);
18021 tp->aperegs = NULL;
18034 pci_release_regions(pdev);
18036 err_out_disable_pdev:
18037 if (pci_is_enabled(pdev))
18038 pci_disable_device(pdev);
18042 static void tg3_remove_one(struct pci_dev *pdev)
18044 struct net_device *dev = pci_get_drvdata(pdev);
18047 struct tg3 *tp = netdev_priv(dev);
18051 release_firmware(tp->fw);
18053 tg3_reset_task_cancel(tp);
18055 if (tg3_flag(tp, USE_PHYLIB)) {
18060 unregister_netdev(dev);
18062 iounmap(tp->aperegs);
18063 tp->aperegs = NULL;
18070 pci_release_regions(pdev);
18071 pci_disable_device(pdev);
18075 #ifdef CONFIG_PM_SLEEP
18076 static int tg3_suspend(struct device *device)
18078 struct net_device *dev = dev_get_drvdata(device);
18079 struct tg3 *tp = netdev_priv(dev);
18084 if (!netif_running(dev))
18087 tg3_reset_task_cancel(tp);
18089 tg3_netif_stop(tp);
18091 tg3_timer_stop(tp);
18093 tg3_full_lock(tp, 1);
18094 tg3_disable_ints(tp);
18095 tg3_full_unlock(tp);
18097 netif_device_detach(dev);
18099 tg3_full_lock(tp, 0);
18100 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18101 tg3_flag_clear(tp, INIT_COMPLETE);
18102 tg3_full_unlock(tp);
18104 err = tg3_power_down_prepare(tp);
18108 tg3_full_lock(tp, 0);
18110 tg3_flag_set(tp, INIT_COMPLETE);
18111 err2 = tg3_restart_hw(tp, true);
18115 tg3_timer_start(tp);
18117 netif_device_attach(dev);
18118 tg3_netif_start(tp);
18121 tg3_full_unlock(tp);
18132 static int tg3_resume(struct device *device)
18134 struct net_device *dev = dev_get_drvdata(device);
18135 struct tg3 *tp = netdev_priv(dev);
18140 if (!netif_running(dev))
18143 netif_device_attach(dev);
18145 tg3_full_lock(tp, 0);
18147 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18149 tg3_flag_set(tp, INIT_COMPLETE);
18150 err = tg3_restart_hw(tp,
18151 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18155 tg3_timer_start(tp);
18157 tg3_netif_start(tp);
18160 tg3_full_unlock(tp);
18169 #endif /* CONFIG_PM_SLEEP */
18171 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18173 static void tg3_shutdown(struct pci_dev *pdev)
18175 struct net_device *dev = pci_get_drvdata(pdev);
18176 struct tg3 *tp = netdev_priv(dev);
18178 tg3_reset_task_cancel(tp);
18182 netif_device_detach(dev);
18184 if (netif_running(dev))
18187 if (system_state == SYSTEM_POWER_OFF)
18188 tg3_power_down(tp);
18192 pci_disable_device(pdev);
18196 * tg3_io_error_detected - called when PCI error is detected
18197 * @pdev: Pointer to PCI device
18198 * @state: The current pci connection state
18200 * This function is called after a PCI bus error affecting
18201 * this device has been detected.
18203 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18204 pci_channel_state_t state)
18206 struct net_device *netdev = pci_get_drvdata(pdev);
18207 struct tg3 *tp = netdev_priv(netdev);
18208 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18210 netdev_info(netdev, "PCI I/O error detected\n");
18212 /* Want to make sure that the reset task doesn't run */
18213 tg3_reset_task_cancel(tp);
18217 /* Could be second call or maybe we don't have netdev yet */
18218 if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
18221 /* We needn't recover from permanent error */
18222 if (state == pci_channel_io_frozen)
18223 tp->pcierr_recovery = true;
18227 tg3_netif_stop(tp);
18229 tg3_timer_stop(tp);
18231 netif_device_detach(netdev);
18233 /* Clean up software state, even if MMIO is blocked */
18234 tg3_full_lock(tp, 0);
18235 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18236 tg3_full_unlock(tp);
18239 if (state == pci_channel_io_perm_failure) {
18241 tg3_napi_enable(tp);
18244 err = PCI_ERS_RESULT_DISCONNECT;
18246 pci_disable_device(pdev);
18255 * tg3_io_slot_reset - called after the pci bus has been reset.
18256 * @pdev: Pointer to PCI device
18258 * Restart the card from scratch, as if from a cold-boot.
18259 * At this point, the card has exprienced a hard reset,
18260 * followed by fixups by BIOS, and has its config space
18261 * set up identically to what it was at cold boot.
18263 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18265 struct net_device *netdev = pci_get_drvdata(pdev);
18266 struct tg3 *tp = netdev_priv(netdev);
18267 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18272 if (pci_enable_device(pdev)) {
18273 dev_err(&pdev->dev,
18274 "Cannot re-enable PCI device after reset.\n");
18278 pci_set_master(pdev);
18279 pci_restore_state(pdev);
18280 pci_save_state(pdev);
18282 if (!netdev || !netif_running(netdev)) {
18283 rc = PCI_ERS_RESULT_RECOVERED;
18287 err = tg3_power_up(tp);
18291 rc = PCI_ERS_RESULT_RECOVERED;
18294 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18295 tg3_napi_enable(tp);
18304 * tg3_io_resume - called when traffic can start flowing again.
18305 * @pdev: Pointer to PCI device
18307 * This callback is called when the error recovery driver tells
18308 * us that its OK to resume normal operation.
18310 static void tg3_io_resume(struct pci_dev *pdev)
18312 struct net_device *netdev = pci_get_drvdata(pdev);
18313 struct tg3 *tp = netdev_priv(netdev);
18318 if (!netdev || !netif_running(netdev))
18321 tg3_full_lock(tp, 0);
18322 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18323 tg3_flag_set(tp, INIT_COMPLETE);
18324 err = tg3_restart_hw(tp, true);
18326 tg3_full_unlock(tp);
18327 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18331 netif_device_attach(netdev);
18333 tg3_timer_start(tp);
18335 tg3_netif_start(tp);
18337 tg3_full_unlock(tp);
18342 tp->pcierr_recovery = false;
18346 static const struct pci_error_handlers tg3_err_handler = {
18347 .error_detected = tg3_io_error_detected,
18348 .slot_reset = tg3_io_slot_reset,
18349 .resume = tg3_io_resume
18352 static struct pci_driver tg3_driver = {
18353 .name = DRV_MODULE_NAME,
18354 .id_table = tg3_pci_tbl,
18355 .probe = tg3_init_one,
18356 .remove = tg3_remove_one,
18357 .err_handler = &tg3_err_handler,
18358 .driver.pm = &tg3_pm_ops,
18359 .shutdown = tg3_shutdown,
18362 module_pci_driver(tg3_driver);