2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2014 Broadcom Corporation.
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/stringify.h>
15 #include <linux/kernel.h>
16 #include <linux/sched/signal.h>
17 #include <linux/types.h>
18 #include <linux/compiler.h>
19 #include <linux/slab.h>
20 #include <linux/delay.h>
22 #include <linux/interrupt.h>
23 #include <linux/ioport.h>
24 #include <linux/pci.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/ethtool.h>
29 #include <linux/mdio.h>
30 #include <linux/mii.h>
31 #include <linux/phy.h>
32 #include <linux/brcmphy.h>
34 #include <linux/if_vlan.h>
36 #include <linux/tcp.h>
37 #include <linux/workqueue.h>
38 #include <linux/prefetch.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/firmware.h>
41 #include <linux/ssb/ssb_driver_gige.h>
42 #include <linux/hwmon.h>
43 #include <linux/hwmon-sysfs.h>
45 #include <net/checksum.h>
49 #include <asm/byteorder.h>
50 #include <linux/uaccess.h>
52 #include <uapi/linux/net_tstamp.h>
53 #include <linux/ptp_clock_kernel.h>
56 #include <asm/idprom.h>
65 /* Functions & macros to verify TG3_FLAGS types */
67 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 return test_bit(flag, bits);
72 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
77 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 clear_bit(flag, bits);
82 #define tg3_flag(tp, flag) \
83 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
84 #define tg3_flag_set(tp, flag) \
85 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_clear(tp, flag) \
87 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89 #define DRV_MODULE_NAME "tg3"
91 #define TG3_MIN_NUM 137
92 #define DRV_MODULE_VERSION \
93 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
94 #define DRV_MODULE_RELDATE "May 11, 2014"
96 #define RESET_KIND_SHUTDOWN 0
97 #define RESET_KIND_INIT 1
98 #define RESET_KIND_SUSPEND 2
100 #define TG3_DEF_RX_MODE 0
101 #define TG3_DEF_TX_MODE 0
102 #define TG3_DEF_MSG_ENABLE \
112 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
114 /* length of time before we decide the hardware is borked,
115 * and dev->tx_timeout() should be called to fix the problem
118 #define TG3_TX_TIMEOUT (5 * HZ)
120 /* hardware minimum and maximum for a single frame's data payload */
121 #define TG3_MIN_MTU ETH_ZLEN
122 #define TG3_MAX_MTU(tp) \
123 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
125 /* These numbers seem to be hard coded in the NIC firmware somehow.
126 * You can't change the ring sizes, but you can change where you place
127 * them in the NIC onboard memory.
129 #define TG3_RX_STD_RING_SIZE(tp) \
130 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
131 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
132 #define TG3_DEF_RX_RING_PENDING 200
133 #define TG3_RX_JMB_RING_SIZE(tp) \
134 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
135 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
136 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
138 /* Do not place this n-ring entries value into the tp struct itself,
139 * we really want to expose these constants to GCC so that modulo et
140 * al. operations are done with shifts and masks instead of with
141 * hw multiply/modulo instructions. Another solution would be to
142 * replace things like '% foo' with '& (foo - 1)'.
145 #define TG3_TX_RING_SIZE 512
146 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
148 #define TG3_RX_STD_RING_BYTES(tp) \
149 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
150 #define TG3_RX_JMB_RING_BYTES(tp) \
151 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
152 #define TG3_RX_RCB_RING_BYTES(tp) \
153 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
154 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
156 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
158 #define TG3_DMA_BYTE_ENAB 64
160 #define TG3_RX_STD_DMA_SZ 1536
161 #define TG3_RX_JMB_DMA_SZ 9046
163 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
165 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
166 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
168 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
169 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
171 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
172 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
174 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
175 * that are at least dword aligned when used in PCIX mode. The driver
176 * works around this bug by double copying the packet. This workaround
177 * is built into the normal double copy length check for efficiency.
179 * However, the double copy is only necessary on those architectures
180 * where unaligned memory accesses are inefficient. For those architectures
181 * where unaligned memory accesses incur little penalty, we can reintegrate
182 * the 5701 in the normal rx path. Doing so saves a device structure
183 * dereference by hardcoding the double copy threshold in place.
185 #define TG3_RX_COPY_THRESHOLD 256
186 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
187 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
189 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
192 #if (NET_IP_ALIGN != 0)
193 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
195 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
198 /* minimum number of free TX descriptors required to wake up TX process */
199 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
200 #define TG3_TX_BD_DMA_MAX_2K 2048
201 #define TG3_TX_BD_DMA_MAX_4K 4096
203 #define TG3_RAW_IP_ALIGN 2
205 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
206 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
208 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
209 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
211 #define FIRMWARE_TG3 "/*(DEBLOBBED)*/"
212 #define FIRMWARE_TG357766 "/*(DEBLOBBED)*/"
213 #define FIRMWARE_TG3TSO "/*(DEBLOBBED)*/"
214 #define FIRMWARE_TG3TSO5 "/*(DEBLOBBED)*/"
216 static char version[] =
217 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
219 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
220 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
221 MODULE_LICENSE("GPL");
222 MODULE_VERSION(DRV_MODULE_VERSION);
225 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
226 module_param(tg3_debug, int, 0);
227 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
229 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
230 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
232 static const struct pci_device_id tg3_pci_tbl[] = {
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
252 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
253 TG3_DRV_DATA_FLAG_5705_10_100},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
255 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
256 TG3_DRV_DATA_FLAG_5705_10_100},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
259 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
260 TG3_DRV_DATA_FLAG_5705_10_100},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
267 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
273 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
281 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
282 PCI_VENDOR_ID_LENOVO,
283 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
284 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
287 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
306 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
307 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
308 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
309 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
310 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
311 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
313 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
314 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
315 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
316 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
325 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
327 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
340 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
343 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
344 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
345 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
346 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
347 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
351 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
353 static const struct {
354 const char string[ETH_GSTRING_LEN];
355 } ethtool_stats_keys[] = {
358 { "rx_ucast_packets" },
359 { "rx_mcast_packets" },
360 { "rx_bcast_packets" },
362 { "rx_align_errors" },
363 { "rx_xon_pause_rcvd" },
364 { "rx_xoff_pause_rcvd" },
365 { "rx_mac_ctrl_rcvd" },
366 { "rx_xoff_entered" },
367 { "rx_frame_too_long_errors" },
369 { "rx_undersize_packets" },
370 { "rx_in_length_errors" },
371 { "rx_out_length_errors" },
372 { "rx_64_or_less_octet_packets" },
373 { "rx_65_to_127_octet_packets" },
374 { "rx_128_to_255_octet_packets" },
375 { "rx_256_to_511_octet_packets" },
376 { "rx_512_to_1023_octet_packets" },
377 { "rx_1024_to_1522_octet_packets" },
378 { "rx_1523_to_2047_octet_packets" },
379 { "rx_2048_to_4095_octet_packets" },
380 { "rx_4096_to_8191_octet_packets" },
381 { "rx_8192_to_9022_octet_packets" },
388 { "tx_flow_control" },
390 { "tx_single_collisions" },
391 { "tx_mult_collisions" },
393 { "tx_excessive_collisions" },
394 { "tx_late_collisions" },
395 { "tx_collide_2times" },
396 { "tx_collide_3times" },
397 { "tx_collide_4times" },
398 { "tx_collide_5times" },
399 { "tx_collide_6times" },
400 { "tx_collide_7times" },
401 { "tx_collide_8times" },
402 { "tx_collide_9times" },
403 { "tx_collide_10times" },
404 { "tx_collide_11times" },
405 { "tx_collide_12times" },
406 { "tx_collide_13times" },
407 { "tx_collide_14times" },
408 { "tx_collide_15times" },
409 { "tx_ucast_packets" },
410 { "tx_mcast_packets" },
411 { "tx_bcast_packets" },
412 { "tx_carrier_sense_errors" },
416 { "dma_writeq_full" },
417 { "dma_write_prioq_full" },
421 { "rx_threshold_hit" },
423 { "dma_readq_full" },
424 { "dma_read_prioq_full" },
425 { "tx_comp_queue_full" },
427 { "ring_set_send_prod_index" },
428 { "ring_status_update" },
430 { "nic_avoided_irqs" },
431 { "nic_tx_threshold_hit" },
433 { "mbuf_lwm_thresh_hit" },
436 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST 0
438 #define TG3_LINK_TEST 1
439 #define TG3_REGISTER_TEST 2
440 #define TG3_MEMORY_TEST 3
441 #define TG3_MAC_LOOPB_TEST 4
442 #define TG3_PHY_LOOPB_TEST 5
443 #define TG3_EXT_LOOPB_TEST 6
444 #define TG3_INTERRUPT_TEST 7
447 static const struct {
448 const char string[ETH_GSTRING_LEN];
449 } ethtool_test_keys[] = {
450 [TG3_NVRAM_TEST] = { "nvram test (online) " },
451 [TG3_LINK_TEST] = { "link test (online) " },
452 [TG3_REGISTER_TEST] = { "register test (offline)" },
453 [TG3_MEMORY_TEST] = { "memory test (offline)" },
454 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
455 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
456 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
457 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
460 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
463 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
465 writel(val, tp->regs + off);
468 static u32 tg3_read32(struct tg3 *tp, u32 off)
470 return readl(tp->regs + off);
473 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
475 writel(val, tp->aperegs + off);
478 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
480 return readl(tp->aperegs + off);
483 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
487 spin_lock_irqsave(&tp->indirect_lock, flags);
488 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
489 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
490 spin_unlock_irqrestore(&tp->indirect_lock, flags);
493 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
495 writel(val, tp->regs + off);
496 readl(tp->regs + off);
499 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
504 spin_lock_irqsave(&tp->indirect_lock, flags);
505 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
506 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507 spin_unlock_irqrestore(&tp->indirect_lock, flags);
511 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
515 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
516 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
517 TG3_64BIT_REG_LOW, val);
520 if (off == TG3_RX_STD_PROD_IDX_REG) {
521 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
522 TG3_64BIT_REG_LOW, val);
526 spin_lock_irqsave(&tp->indirect_lock, flags);
527 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
528 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
529 spin_unlock_irqrestore(&tp->indirect_lock, flags);
531 /* In indirect mode when disabling interrupts, we also need
532 * to clear the interrupt bit in the GRC local ctrl register.
534 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
536 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
537 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
541 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
546 spin_lock_irqsave(&tp->indirect_lock, flags);
547 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
548 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
549 spin_unlock_irqrestore(&tp->indirect_lock, flags);
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554 * where it is unsafe to read back the register without some delay.
555 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
558 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
560 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
561 /* Non-posted methods */
562 tp->write32(tp, off, val);
565 tg3_write32(tp, off, val);
570 /* Wait again after the read for the posted method to guarantee that
571 * the wait time is met.
577 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
579 tp->write32_mbox(tp, off, val);
580 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
581 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
582 !tg3_flag(tp, ICH_WORKAROUND)))
583 tp->read32_mbox(tp, off);
586 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
588 void __iomem *mbox = tp->regs + off;
590 if (tg3_flag(tp, TXD_MBOX_HWBUG))
592 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
593 tg3_flag(tp, FLUSH_POSTED_WRITES))
597 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
599 return readl(tp->regs + off + GRCMBOX_BASE);
602 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
604 writel(val, tp->regs + off + GRCMBOX_BASE);
607 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
613 #define tw32(reg, val) tp->write32(tp, reg, val)
614 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg) tp->read32(tp, reg)
618 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
622 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
623 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
626 spin_lock_irqsave(&tp->indirect_lock, flags);
627 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
628 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
629 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
631 /* Always leave this as zero. */
632 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
634 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
635 tw32_f(TG3PCI_MEM_WIN_DATA, val);
637 /* Always leave this as zero. */
638 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
640 spin_unlock_irqrestore(&tp->indirect_lock, flags);
643 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
647 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
648 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
653 spin_lock_irqsave(&tp->indirect_lock, flags);
654 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
655 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
656 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
658 /* Always leave this as zero. */
659 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
661 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
662 *val = tr32(TG3PCI_MEM_WIN_DATA);
664 /* Always leave this as zero. */
665 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
667 spin_unlock_irqrestore(&tp->indirect_lock, flags);
670 static void tg3_ape_lock_init(struct tg3 *tp)
675 if (tg3_asic_rev(tp) == ASIC_REV_5761)
676 regbase = TG3_APE_LOCK_GRANT;
678 regbase = TG3_APE_PER_LOCK_GRANT;
680 /* Make sure the driver hasn't any stale locks. */
681 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
683 case TG3_APE_LOCK_PHY0:
684 case TG3_APE_LOCK_PHY1:
685 case TG3_APE_LOCK_PHY2:
686 case TG3_APE_LOCK_PHY3:
687 bit = APE_LOCK_GRANT_DRIVER;
691 bit = APE_LOCK_GRANT_DRIVER;
693 bit = 1 << tp->pci_fn;
695 tg3_ape_write32(tp, regbase + 4 * i, bit);
700 static int tg3_ape_lock(struct tg3 *tp, int locknum)
704 u32 status, req, gnt, bit;
706 if (!tg3_flag(tp, ENABLE_APE))
710 case TG3_APE_LOCK_GPIO:
711 if (tg3_asic_rev(tp) == ASIC_REV_5761)
713 case TG3_APE_LOCK_GRC:
714 case TG3_APE_LOCK_MEM:
716 bit = APE_LOCK_REQ_DRIVER;
718 bit = 1 << tp->pci_fn;
720 case TG3_APE_LOCK_PHY0:
721 case TG3_APE_LOCK_PHY1:
722 case TG3_APE_LOCK_PHY2:
723 case TG3_APE_LOCK_PHY3:
724 bit = APE_LOCK_REQ_DRIVER;
730 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
731 req = TG3_APE_LOCK_REQ;
732 gnt = TG3_APE_LOCK_GRANT;
734 req = TG3_APE_PER_LOCK_REQ;
735 gnt = TG3_APE_PER_LOCK_GRANT;
740 tg3_ape_write32(tp, req + off, bit);
742 /* Wait for up to 1 millisecond to acquire lock. */
743 for (i = 0; i < 100; i++) {
744 status = tg3_ape_read32(tp, gnt + off);
747 if (pci_channel_offline(tp->pdev))
754 /* Revoke the lock request. */
755 tg3_ape_write32(tp, gnt + off, bit);
762 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
766 if (!tg3_flag(tp, ENABLE_APE))
770 case TG3_APE_LOCK_GPIO:
771 if (tg3_asic_rev(tp) == ASIC_REV_5761)
773 case TG3_APE_LOCK_GRC:
774 case TG3_APE_LOCK_MEM:
776 bit = APE_LOCK_GRANT_DRIVER;
778 bit = 1 << tp->pci_fn;
780 case TG3_APE_LOCK_PHY0:
781 case TG3_APE_LOCK_PHY1:
782 case TG3_APE_LOCK_PHY2:
783 case TG3_APE_LOCK_PHY3:
784 bit = APE_LOCK_GRANT_DRIVER;
790 if (tg3_asic_rev(tp) == ASIC_REV_5761)
791 gnt = TG3_APE_LOCK_GRANT;
793 gnt = TG3_APE_PER_LOCK_GRANT;
795 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
798 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
803 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
806 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
807 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
810 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
813 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
816 return timeout_us ? 0 : -EBUSY;
819 #ifdef CONFIG_TIGON3_HWMON
820 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
824 for (i = 0; i < timeout_us / 10; i++) {
825 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
827 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
833 return i == timeout_us / 10;
836 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
840 u32 i, bufoff, msgoff, maxlen, apedata;
842 if (!tg3_flag(tp, APE_HAS_NCSI))
845 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
846 if (apedata != APE_SEG_SIG_MAGIC)
849 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
850 if (!(apedata & APE_FW_STATUS_READY))
853 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
855 msgoff = bufoff + 2 * sizeof(u32);
856 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
861 /* Cap xfer sizes to scratchpad limits. */
862 length = (len > maxlen) ? maxlen : len;
865 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
866 if (!(apedata & APE_FW_STATUS_READY))
869 /* Wait for up to 1 msec for APE to service previous event. */
870 err = tg3_ape_event_lock(tp, 1000);
874 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
875 APE_EVENT_STATUS_SCRTCHPD_READ |
876 APE_EVENT_STATUS_EVENT_PENDING;
877 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
879 tg3_ape_write32(tp, bufoff, base_off);
880 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
882 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
883 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
887 if (tg3_ape_wait_for_event(tp, 30000))
890 for (i = 0; length; i += 4, length -= 4) {
891 u32 val = tg3_ape_read32(tp, msgoff + i);
892 memcpy(data, &val, sizeof(u32));
901 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
906 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
907 if (apedata != APE_SEG_SIG_MAGIC)
910 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
911 if (!(apedata & APE_FW_STATUS_READY))
914 /* Wait for up to 1 millisecond for APE to service previous event. */
915 err = tg3_ape_event_lock(tp, 1000);
919 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
920 event | APE_EVENT_STATUS_EVENT_PENDING);
922 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
923 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
928 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
933 if (!tg3_flag(tp, ENABLE_APE))
937 case RESET_KIND_INIT:
938 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
939 APE_HOST_SEG_SIG_MAGIC);
940 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
941 APE_HOST_SEG_LEN_MAGIC);
942 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
943 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
944 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
945 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
946 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
947 APE_HOST_BEHAV_NO_PHYLOCK);
948 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
949 TG3_APE_HOST_DRVR_STATE_START);
951 event = APE_EVENT_STATUS_STATE_START;
953 case RESET_KIND_SHUTDOWN:
954 /* With the interface we are currently using,
955 * APE does not track driver state. Wiping
956 * out the HOST SEGMENT SIGNATURE forces
957 * the APE to assume OS absent status.
959 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
961 if (device_may_wakeup(&tp->pdev->dev) &&
962 tg3_flag(tp, WOL_ENABLE)) {
963 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
964 TG3_APE_HOST_WOL_SPEED_AUTO);
965 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
967 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
969 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
971 event = APE_EVENT_STATUS_STATE_UNLOAD;
977 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
979 tg3_ape_send_event(tp, event);
982 static void tg3_disable_ints(struct tg3 *tp)
986 tw32(TG3PCI_MISC_HOST_CTRL,
987 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
988 for (i = 0; i < tp->irq_max; i++)
989 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
992 static void tg3_enable_ints(struct tg3 *tp)
999 tw32(TG3PCI_MISC_HOST_CTRL,
1000 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1002 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1003 for (i = 0; i < tp->irq_cnt; i++) {
1004 struct tg3_napi *tnapi = &tp->napi[i];
1006 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1007 if (tg3_flag(tp, 1SHOT_MSI))
1008 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1010 tp->coal_now |= tnapi->coal_now;
1013 /* Force an initial interrupt */
1014 if (!tg3_flag(tp, TAGGED_STATUS) &&
1015 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1016 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1018 tw32(HOSTCC_MODE, tp->coal_now);
1020 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1023 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1025 struct tg3 *tp = tnapi->tp;
1026 struct tg3_hw_status *sblk = tnapi->hw_status;
1027 unsigned int work_exists = 0;
1029 /* check for phy events */
1030 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1031 if (sblk->status & SD_STATUS_LINK_CHG)
1035 /* check for TX work to do */
1036 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1039 /* check for RX work to do */
1040 if (tnapi->rx_rcb_prod_idx &&
1041 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1048 * similar to tg3_enable_ints, but it accurately determines whether there
1049 * is new work pending and can return without flushing the PIO write
1050 * which reenables interrupts
1052 static void tg3_int_reenable(struct tg3_napi *tnapi)
1054 struct tg3 *tp = tnapi->tp;
1056 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1059 /* When doing tagged status, this work check is unnecessary.
1060 * The last_tag we write above tells the chip which piece of
1061 * work we've completed.
1063 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1064 tw32(HOSTCC_MODE, tp->coalesce_mode |
1065 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1068 static void tg3_switch_clocks(struct tg3 *tp)
1071 u32 orig_clock_ctrl;
1073 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1076 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1078 orig_clock_ctrl = clock_ctrl;
1079 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1080 CLOCK_CTRL_CLKRUN_OENABLE |
1082 tp->pci_clock_ctrl = clock_ctrl;
1084 if (tg3_flag(tp, 5705_PLUS)) {
1085 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1086 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1087 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1089 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1090 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1092 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1094 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1095 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1098 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1101 #define PHY_BUSY_LOOPS 5000
1103 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1110 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1112 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1116 tg3_ape_lock(tp, tp->phy_ape_lock);
1120 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1121 MI_COM_PHY_ADDR_MASK);
1122 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1123 MI_COM_REG_ADDR_MASK);
1124 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1126 tw32_f(MAC_MI_COM, frame_val);
1128 loops = PHY_BUSY_LOOPS;
1129 while (loops != 0) {
1131 frame_val = tr32(MAC_MI_COM);
1133 if ((frame_val & MI_COM_BUSY) == 0) {
1135 frame_val = tr32(MAC_MI_COM);
1143 *val = frame_val & MI_COM_DATA_MASK;
1147 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1148 tw32_f(MAC_MI_MODE, tp->mi_mode);
1152 tg3_ape_unlock(tp, tp->phy_ape_lock);
1157 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1159 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1162 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1169 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1170 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1173 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1175 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1179 tg3_ape_lock(tp, tp->phy_ape_lock);
1181 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1182 MI_COM_PHY_ADDR_MASK);
1183 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1184 MI_COM_REG_ADDR_MASK);
1185 frame_val |= (val & MI_COM_DATA_MASK);
1186 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1188 tw32_f(MAC_MI_COM, frame_val);
1190 loops = PHY_BUSY_LOOPS;
1191 while (loops != 0) {
1193 frame_val = tr32(MAC_MI_COM);
1194 if ((frame_val & MI_COM_BUSY) == 0) {
1196 frame_val = tr32(MAC_MI_COM);
1206 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1207 tw32_f(MAC_MI_MODE, tp->mi_mode);
1211 tg3_ape_unlock(tp, tp->phy_ape_lock);
1216 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1218 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1221 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1225 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1229 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1233 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1234 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1238 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1244 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1248 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1252 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1256 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1257 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1261 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1267 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1271 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1273 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1278 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1282 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1284 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1289 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1293 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1294 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1295 MII_TG3_AUXCTL_SHDWSEL_MISC);
1297 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1302 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1304 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1305 set |= MII_TG3_AUXCTL_MISC_WREN;
1307 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1310 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1315 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1321 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1323 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1325 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1326 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1331 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1333 return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1334 reg | val | MII_TG3_MISC_SHDW_WREN);
1337 static int tg3_bmcr_reset(struct tg3 *tp)
1342 /* OK, reset it, and poll the BMCR_RESET bit until it
1343 * clears or we time out.
1345 phy_control = BMCR_RESET;
1346 err = tg3_writephy(tp, MII_BMCR, phy_control);
1352 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1356 if ((phy_control & BMCR_RESET) == 0) {
1368 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1370 struct tg3 *tp = bp->priv;
1373 spin_lock_bh(&tp->lock);
1375 if (__tg3_readphy(tp, mii_id, reg, &val))
1378 spin_unlock_bh(&tp->lock);
1383 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1385 struct tg3 *tp = bp->priv;
1388 spin_lock_bh(&tp->lock);
1390 if (__tg3_writephy(tp, mii_id, reg, val))
1393 spin_unlock_bh(&tp->lock);
1398 static void tg3_mdio_config_5785(struct tg3 *tp)
1401 struct phy_device *phydev;
1403 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1404 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1405 case PHY_ID_BCM50610:
1406 case PHY_ID_BCM50610M:
1407 val = MAC_PHYCFG2_50610_LED_MODES;
1409 case PHY_ID_BCMAC131:
1410 val = MAC_PHYCFG2_AC131_LED_MODES;
1412 case PHY_ID_RTL8211C:
1413 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1415 case PHY_ID_RTL8201E:
1416 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1422 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1423 tw32(MAC_PHYCFG2, val);
1425 val = tr32(MAC_PHYCFG1);
1426 val &= ~(MAC_PHYCFG1_RGMII_INT |
1427 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1428 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1429 tw32(MAC_PHYCFG1, val);
1434 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1435 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1436 MAC_PHYCFG2_FMODE_MASK_MASK |
1437 MAC_PHYCFG2_GMODE_MASK_MASK |
1438 MAC_PHYCFG2_ACT_MASK_MASK |
1439 MAC_PHYCFG2_QUAL_MASK_MASK |
1440 MAC_PHYCFG2_INBAND_ENABLE;
1442 tw32(MAC_PHYCFG2, val);
1444 val = tr32(MAC_PHYCFG1);
1445 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1446 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1447 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1448 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1449 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1450 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1451 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1453 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1454 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1455 tw32(MAC_PHYCFG1, val);
1457 val = tr32(MAC_EXT_RGMII_MODE);
1458 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1459 MAC_RGMII_MODE_RX_QUALITY |
1460 MAC_RGMII_MODE_RX_ACTIVITY |
1461 MAC_RGMII_MODE_RX_ENG_DET |
1462 MAC_RGMII_MODE_TX_ENABLE |
1463 MAC_RGMII_MODE_TX_LOWPWR |
1464 MAC_RGMII_MODE_TX_RESET);
1465 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1466 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1467 val |= MAC_RGMII_MODE_RX_INT_B |
1468 MAC_RGMII_MODE_RX_QUALITY |
1469 MAC_RGMII_MODE_RX_ACTIVITY |
1470 MAC_RGMII_MODE_RX_ENG_DET;
1471 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1472 val |= MAC_RGMII_MODE_TX_ENABLE |
1473 MAC_RGMII_MODE_TX_LOWPWR |
1474 MAC_RGMII_MODE_TX_RESET;
1476 tw32(MAC_EXT_RGMII_MODE, val);
1479 static void tg3_mdio_start(struct tg3 *tp)
1481 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1482 tw32_f(MAC_MI_MODE, tp->mi_mode);
1485 if (tg3_flag(tp, MDIOBUS_INITED) &&
1486 tg3_asic_rev(tp) == ASIC_REV_5785)
1487 tg3_mdio_config_5785(tp);
1490 static int tg3_mdio_init(struct tg3 *tp)
1494 struct phy_device *phydev;
1496 if (tg3_flag(tp, 5717_PLUS)) {
1499 tp->phy_addr = tp->pci_fn + 1;
1501 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1502 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1504 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1505 TG3_CPMU_PHY_STRAP_IS_SERDES;
1508 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1511 addr = ssb_gige_get_phyaddr(tp->pdev);
1514 tp->phy_addr = addr;
1516 tp->phy_addr = TG3_PHY_MII_ADDR;
1520 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1523 tp->mdio_bus = mdiobus_alloc();
1524 if (tp->mdio_bus == NULL)
1527 tp->mdio_bus->name = "tg3 mdio bus";
1528 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1529 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1530 tp->mdio_bus->priv = tp;
1531 tp->mdio_bus->parent = &tp->pdev->dev;
1532 tp->mdio_bus->read = &tg3_mdio_read;
1533 tp->mdio_bus->write = &tg3_mdio_write;
1534 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1536 /* The bus registration will look for all the PHYs on the mdio bus.
1537 * Unfortunately, it does not ensure the PHY is powered up before
1538 * accessing the PHY ID registers. A chip reset is the
1539 * quickest way to bring the device back to an operational state..
1541 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1544 i = mdiobus_register(tp->mdio_bus);
1546 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1547 mdiobus_free(tp->mdio_bus);
1551 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1553 if (!phydev || !phydev->drv) {
1554 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1555 mdiobus_unregister(tp->mdio_bus);
1556 mdiobus_free(tp->mdio_bus);
1560 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1561 case PHY_ID_BCM57780:
1562 phydev->interface = PHY_INTERFACE_MODE_GMII;
1563 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1565 case PHY_ID_BCM50610:
1566 case PHY_ID_BCM50610M:
1567 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1568 PHY_BRCM_RX_REFCLK_UNUSED |
1569 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1570 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1571 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1572 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1573 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1574 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1575 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1576 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1578 case PHY_ID_RTL8211C:
1579 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1581 case PHY_ID_RTL8201E:
1582 case PHY_ID_BCMAC131:
1583 phydev->interface = PHY_INTERFACE_MODE_MII;
1584 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1585 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1589 tg3_flag_set(tp, MDIOBUS_INITED);
1591 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1592 tg3_mdio_config_5785(tp);
1597 static void tg3_mdio_fini(struct tg3 *tp)
1599 if (tg3_flag(tp, MDIOBUS_INITED)) {
1600 tg3_flag_clear(tp, MDIOBUS_INITED);
1601 mdiobus_unregister(tp->mdio_bus);
1602 mdiobus_free(tp->mdio_bus);
1606 /* tp->lock is held. */
1607 static inline void tg3_generate_fw_event(struct tg3 *tp)
1611 val = tr32(GRC_RX_CPU_EVENT);
1612 val |= GRC_RX_CPU_DRIVER_EVENT;
1613 tw32_f(GRC_RX_CPU_EVENT, val);
1615 tp->last_event_jiffies = jiffies;
1618 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1620 /* tp->lock is held. */
1621 static void tg3_wait_for_event_ack(struct tg3 *tp)
1624 unsigned int delay_cnt;
1627 /* If enough time has passed, no wait is necessary. */
1628 time_remain = (long)(tp->last_event_jiffies + 1 +
1629 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1631 if (time_remain < 0)
1634 /* Check if we can shorten the wait time. */
1635 delay_cnt = jiffies_to_usecs(time_remain);
1636 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1637 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1638 delay_cnt = (delay_cnt >> 3) + 1;
1640 for (i = 0; i < delay_cnt; i++) {
1641 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1643 if (pci_channel_offline(tp->pdev))
1650 /* tp->lock is held. */
1651 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1656 if (!tg3_readphy(tp, MII_BMCR, ®))
1658 if (!tg3_readphy(tp, MII_BMSR, ®))
1659 val |= (reg & 0xffff);
1663 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1665 if (!tg3_readphy(tp, MII_LPA, ®))
1666 val |= (reg & 0xffff);
1670 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1671 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1673 if (!tg3_readphy(tp, MII_STAT1000, ®))
1674 val |= (reg & 0xffff);
1678 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1685 /* tp->lock is held. */
1686 static void tg3_ump_link_report(struct tg3 *tp)
1690 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1693 tg3_phy_gather_ump_data(tp, data);
1695 tg3_wait_for_event_ack(tp);
1697 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1698 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1699 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1700 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1701 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1702 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1704 tg3_generate_fw_event(tp);
1707 /* tp->lock is held. */
1708 static void tg3_stop_fw(struct tg3 *tp)
1710 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1711 /* Wait for RX cpu to ACK the previous event. */
1712 tg3_wait_for_event_ack(tp);
1714 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1716 tg3_generate_fw_event(tp);
1718 /* Wait for RX cpu to ACK this event. */
1719 tg3_wait_for_event_ack(tp);
1723 /* tp->lock is held. */
1724 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1726 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1727 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1729 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1731 case RESET_KIND_INIT:
1732 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1736 case RESET_KIND_SHUTDOWN:
1737 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1741 case RESET_KIND_SUSPEND:
1742 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1752 /* tp->lock is held. */
1753 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1755 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1757 case RESET_KIND_INIT:
1758 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1759 DRV_STATE_START_DONE);
1762 case RESET_KIND_SHUTDOWN:
1763 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1764 DRV_STATE_UNLOAD_DONE);
1773 /* tp->lock is held. */
1774 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1776 if (tg3_flag(tp, ENABLE_ASF)) {
1778 case RESET_KIND_INIT:
1779 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1783 case RESET_KIND_SHUTDOWN:
1784 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1788 case RESET_KIND_SUSPEND:
1789 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1799 static int tg3_poll_fw(struct tg3 *tp)
1804 if (tg3_flag(tp, NO_FWARE_REPORTED))
1807 if (tg3_flag(tp, IS_SSB_CORE)) {
1808 /* We don't use firmware. */
1812 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1813 /* Wait up to 20ms for init done. */
1814 for (i = 0; i < 200; i++) {
1815 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1817 if (pci_channel_offline(tp->pdev))
1825 /* Wait for firmware initialization to complete. */
1826 for (i = 0; i < 100000; i++) {
1827 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1828 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1830 if (pci_channel_offline(tp->pdev)) {
1831 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1832 tg3_flag_set(tp, NO_FWARE_REPORTED);
1833 netdev_info(tp->dev, "No firmware running\n");
1842 /* Chip might not be fitted with firmware. Some Sun onboard
1843 * parts are configured like that. So don't signal the timeout
1844 * of the above loop as an error, but do report the lack of
1845 * running firmware once.
1847 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1848 tg3_flag_set(tp, NO_FWARE_REPORTED);
1850 netdev_info(tp->dev, "No firmware running\n");
1853 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1854 /* The 57765 A0 needs a little more
1855 * time to do some important work.
1863 static void tg3_link_report(struct tg3 *tp)
1865 if (!netif_carrier_ok(tp->dev)) {
1866 netif_info(tp, link, tp->dev, "Link is down\n");
1867 tg3_ump_link_report(tp);
1868 } else if (netif_msg_link(tp)) {
1869 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1870 (tp->link_config.active_speed == SPEED_1000 ?
1872 (tp->link_config.active_speed == SPEED_100 ?
1874 (tp->link_config.active_duplex == DUPLEX_FULL ?
1877 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1878 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1880 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1883 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1884 netdev_info(tp->dev, "EEE is %s\n",
1885 tp->setlpicnt ? "enabled" : "disabled");
1887 tg3_ump_link_report(tp);
1890 tp->link_up = netif_carrier_ok(tp->dev);
1893 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1897 if (adv & ADVERTISE_PAUSE_CAP) {
1898 flowctrl |= FLOW_CTRL_RX;
1899 if (!(adv & ADVERTISE_PAUSE_ASYM))
1900 flowctrl |= FLOW_CTRL_TX;
1901 } else if (adv & ADVERTISE_PAUSE_ASYM)
1902 flowctrl |= FLOW_CTRL_TX;
1907 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1911 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1912 miireg = ADVERTISE_1000XPAUSE;
1913 else if (flow_ctrl & FLOW_CTRL_TX)
1914 miireg = ADVERTISE_1000XPSE_ASYM;
1915 else if (flow_ctrl & FLOW_CTRL_RX)
1916 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1923 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1927 if (adv & ADVERTISE_1000XPAUSE) {
1928 flowctrl |= FLOW_CTRL_RX;
1929 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1930 flowctrl |= FLOW_CTRL_TX;
1931 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1932 flowctrl |= FLOW_CTRL_TX;
1937 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1941 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1942 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1943 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1944 if (lcladv & ADVERTISE_1000XPAUSE)
1946 if (rmtadv & ADVERTISE_1000XPAUSE)
1953 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1957 u32 old_rx_mode = tp->rx_mode;
1958 u32 old_tx_mode = tp->tx_mode;
1960 if (tg3_flag(tp, USE_PHYLIB))
1961 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1963 autoneg = tp->link_config.autoneg;
1965 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1966 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1967 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1969 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1971 flowctrl = tp->link_config.flowctrl;
1973 tp->link_config.active_flowctrl = flowctrl;
1975 if (flowctrl & FLOW_CTRL_RX)
1976 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1978 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1980 if (old_rx_mode != tp->rx_mode)
1981 tw32_f(MAC_RX_MODE, tp->rx_mode);
1983 if (flowctrl & FLOW_CTRL_TX)
1984 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1986 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1988 if (old_tx_mode != tp->tx_mode)
1989 tw32_f(MAC_TX_MODE, tp->tx_mode);
1992 static void tg3_adjust_link(struct net_device *dev)
1994 u8 oldflowctrl, linkmesg = 0;
1995 u32 mac_mode, lcl_adv, rmt_adv;
1996 struct tg3 *tp = netdev_priv(dev);
1997 struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1999 spin_lock_bh(&tp->lock);
2001 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2002 MAC_MODE_HALF_DUPLEX);
2004 oldflowctrl = tp->link_config.active_flowctrl;
2010 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2011 mac_mode |= MAC_MODE_PORT_MODE_MII;
2012 else if (phydev->speed == SPEED_1000 ||
2013 tg3_asic_rev(tp) != ASIC_REV_5785)
2014 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2016 mac_mode |= MAC_MODE_PORT_MODE_MII;
2018 if (phydev->duplex == DUPLEX_HALF)
2019 mac_mode |= MAC_MODE_HALF_DUPLEX;
2021 lcl_adv = mii_advertise_flowctrl(
2022 tp->link_config.flowctrl);
2025 rmt_adv = LPA_PAUSE_CAP;
2026 if (phydev->asym_pause)
2027 rmt_adv |= LPA_PAUSE_ASYM;
2030 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2032 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2034 if (mac_mode != tp->mac_mode) {
2035 tp->mac_mode = mac_mode;
2036 tw32_f(MAC_MODE, tp->mac_mode);
2040 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2041 if (phydev->speed == SPEED_10)
2043 MAC_MI_STAT_10MBPS_MODE |
2044 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2046 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2049 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2050 tw32(MAC_TX_LENGTHS,
2051 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2052 (6 << TX_LENGTHS_IPG_SHIFT) |
2053 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2055 tw32(MAC_TX_LENGTHS,
2056 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2057 (6 << TX_LENGTHS_IPG_SHIFT) |
2058 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2060 if (phydev->link != tp->old_link ||
2061 phydev->speed != tp->link_config.active_speed ||
2062 phydev->duplex != tp->link_config.active_duplex ||
2063 oldflowctrl != tp->link_config.active_flowctrl)
2066 tp->old_link = phydev->link;
2067 tp->link_config.active_speed = phydev->speed;
2068 tp->link_config.active_duplex = phydev->duplex;
2070 spin_unlock_bh(&tp->lock);
2073 tg3_link_report(tp);
2076 static int tg3_phy_init(struct tg3 *tp)
2078 struct phy_device *phydev;
2080 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2083 /* Bring the PHY back to a known state. */
2086 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2088 /* Attach the MAC to the PHY. */
2089 phydev = phy_connect(tp->dev, phydev_name(phydev),
2090 tg3_adjust_link, phydev->interface);
2091 if (IS_ERR(phydev)) {
2092 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2093 return PTR_ERR(phydev);
2096 /* Mask with MAC supported features. */
2097 switch (phydev->interface) {
2098 case PHY_INTERFACE_MODE_GMII:
2099 case PHY_INTERFACE_MODE_RGMII:
2100 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2101 phydev->supported &= (PHY_GBIT_FEATURES |
2103 SUPPORTED_Asym_Pause);
2107 case PHY_INTERFACE_MODE_MII:
2108 phydev->supported &= (PHY_BASIC_FEATURES |
2110 SUPPORTED_Asym_Pause);
2113 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2117 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2119 phydev->advertising = phydev->supported;
2121 phy_attached_info(phydev);
2126 static void tg3_phy_start(struct tg3 *tp)
2128 struct phy_device *phydev;
2130 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2133 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2135 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2136 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2137 phydev->speed = tp->link_config.speed;
2138 phydev->duplex = tp->link_config.duplex;
2139 phydev->autoneg = tp->link_config.autoneg;
2140 phydev->advertising = tp->link_config.advertising;
2145 phy_start_aneg(phydev);
2148 static void tg3_phy_stop(struct tg3 *tp)
2150 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2153 phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2156 static void tg3_phy_fini(struct tg3 *tp)
2158 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2159 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2160 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2164 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2169 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2172 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2173 /* Cannot do read-modify-write on 5401 */
2174 err = tg3_phy_auxctl_write(tp,
2175 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2176 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2181 err = tg3_phy_auxctl_read(tp,
2182 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2186 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2187 err = tg3_phy_auxctl_write(tp,
2188 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2194 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2198 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2201 tg3_writephy(tp, MII_TG3_FET_TEST,
2202 phytest | MII_TG3_FET_SHADOW_EN);
2203 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2205 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2207 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2208 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2210 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2214 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2218 if (!tg3_flag(tp, 5705_PLUS) ||
2219 (tg3_flag(tp, 5717_PLUS) &&
2220 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2223 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2224 tg3_phy_fet_toggle_apd(tp, enable);
2228 reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2229 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2230 MII_TG3_MISC_SHDW_SCR5_SDTL |
2231 MII_TG3_MISC_SHDW_SCR5_C125OE;
2232 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2233 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2235 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2238 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2240 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2242 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2245 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2249 if (!tg3_flag(tp, 5705_PLUS) ||
2250 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2253 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2256 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2257 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2259 tg3_writephy(tp, MII_TG3_FET_TEST,
2260 ephy | MII_TG3_FET_SHADOW_EN);
2261 if (!tg3_readphy(tp, reg, &phy)) {
2263 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2265 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2266 tg3_writephy(tp, reg, phy);
2268 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2273 ret = tg3_phy_auxctl_read(tp,
2274 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2277 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2279 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2280 tg3_phy_auxctl_write(tp,
2281 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2286 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2291 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2294 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2296 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2297 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2300 static void tg3_phy_apply_otp(struct tg3 *tp)
2309 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2312 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2313 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2314 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2316 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2317 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2318 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2320 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2321 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2322 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2324 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2325 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2327 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2328 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2330 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2331 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2332 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2334 tg3_phy_toggle_auxctl_smdsp(tp, false);
2337 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2340 struct ethtool_eee *dest = &tp->eee;
2342 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2348 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2351 /* Pull eee_active */
2352 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2353 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2354 dest->eee_active = 1;
2356 dest->eee_active = 0;
2358 /* Pull lp advertised settings */
2359 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2361 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2363 /* Pull advertised and eee_enabled settings */
2364 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2366 dest->eee_enabled = !!val;
2367 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2369 /* Pull tx_lpi_enabled */
2370 val = tr32(TG3_CPMU_EEE_MODE);
2371 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2373 /* Pull lpi timer value */
2374 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2377 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2381 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2386 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2388 tp->link_config.active_duplex == DUPLEX_FULL &&
2389 (tp->link_config.active_speed == SPEED_100 ||
2390 tp->link_config.active_speed == SPEED_1000)) {
2393 if (tp->link_config.active_speed == SPEED_1000)
2394 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2396 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2398 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2400 tg3_eee_pull_config(tp, NULL);
2401 if (tp->eee.eee_active)
2405 if (!tp->setlpicnt) {
2406 if (current_link_up &&
2407 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2408 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2409 tg3_phy_toggle_auxctl_smdsp(tp, false);
2412 val = tr32(TG3_CPMU_EEE_MODE);
2413 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2417 static void tg3_phy_eee_enable(struct tg3 *tp)
2421 if (tp->link_config.active_speed == SPEED_1000 &&
2422 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2423 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2424 tg3_flag(tp, 57765_CLASS)) &&
2425 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2426 val = MII_TG3_DSP_TAP26_ALNOKO |
2427 MII_TG3_DSP_TAP26_RMRXSTO;
2428 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2429 tg3_phy_toggle_auxctl_smdsp(tp, false);
2432 val = tr32(TG3_CPMU_EEE_MODE);
2433 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2436 static int tg3_wait_macro_done(struct tg3 *tp)
2443 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2444 if ((tmp32 & 0x1000) == 0)
2454 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2456 static const u32 test_pat[4][6] = {
2457 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2458 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2459 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2460 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2464 for (chan = 0; chan < 4; chan++) {
2467 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2468 (chan * 0x2000) | 0x0200);
2469 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2471 for (i = 0; i < 6; i++)
2472 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2475 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2476 if (tg3_wait_macro_done(tp)) {
2481 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2482 (chan * 0x2000) | 0x0200);
2483 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2484 if (tg3_wait_macro_done(tp)) {
2489 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2490 if (tg3_wait_macro_done(tp)) {
2495 for (i = 0; i < 6; i += 2) {
2498 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2499 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2500 tg3_wait_macro_done(tp)) {
2506 if (low != test_pat[chan][i] ||
2507 high != test_pat[chan][i+1]) {
2508 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2509 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2510 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2520 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2524 for (chan = 0; chan < 4; chan++) {
2527 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2528 (chan * 0x2000) | 0x0200);
2529 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2530 for (i = 0; i < 6; i++)
2531 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2532 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2533 if (tg3_wait_macro_done(tp))
2540 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2542 u32 reg32, phy9_orig;
2543 int retries, do_phy_reset, err;
2549 err = tg3_bmcr_reset(tp);
2555 /* Disable transmitter and interrupt. */
2556 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2560 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2562 /* Set full-duplex, 1000 mbps. */
2563 tg3_writephy(tp, MII_BMCR,
2564 BMCR_FULLDPLX | BMCR_SPEED1000);
2566 /* Set to master mode. */
2567 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2570 tg3_writephy(tp, MII_CTRL1000,
2571 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2573 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2577 /* Block the PHY control access. */
2578 tg3_phydsp_write(tp, 0x8005, 0x0800);
2580 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2583 } while (--retries);
2585 err = tg3_phy_reset_chanpat(tp);
2589 tg3_phydsp_write(tp, 0x8005, 0x0000);
2591 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2592 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2594 tg3_phy_toggle_auxctl_smdsp(tp, false);
2596 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2598 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32);
2603 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2608 static void tg3_carrier_off(struct tg3 *tp)
2610 netif_carrier_off(tp->dev);
2611 tp->link_up = false;
2614 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2616 if (tg3_flag(tp, ENABLE_ASF))
2617 netdev_warn(tp->dev,
2618 "Management side-band traffic will be interrupted during phy settings change\n");
2621 /* This will reset the tigon3 PHY if there is no valid
2622 * link unless the FORCE argument is non-zero.
2624 static int tg3_phy_reset(struct tg3 *tp)
2629 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2630 val = tr32(GRC_MISC_CFG);
2631 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2634 err = tg3_readphy(tp, MII_BMSR, &val);
2635 err |= tg3_readphy(tp, MII_BMSR, &val);
2639 if (netif_running(tp->dev) && tp->link_up) {
2640 netif_carrier_off(tp->dev);
2641 tg3_link_report(tp);
2644 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2645 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2646 tg3_asic_rev(tp) == ASIC_REV_5705) {
2647 err = tg3_phy_reset_5703_4_5(tp);
2654 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2655 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2656 cpmuctrl = tr32(TG3_CPMU_CTRL);
2657 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2659 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2662 err = tg3_bmcr_reset(tp);
2666 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2667 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2668 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2670 tw32(TG3_CPMU_CTRL, cpmuctrl);
2673 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2674 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2675 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2676 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2677 CPMU_LSPD_1000MB_MACCLK_12_5) {
2678 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2680 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2684 if (tg3_flag(tp, 5717_PLUS) &&
2685 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2688 tg3_phy_apply_otp(tp);
2690 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2691 tg3_phy_toggle_apd(tp, true);
2693 tg3_phy_toggle_apd(tp, false);
2696 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2697 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2698 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2699 tg3_phydsp_write(tp, 0x000a, 0x0323);
2700 tg3_phy_toggle_auxctl_smdsp(tp, false);
2703 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2704 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2705 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2708 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2709 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2710 tg3_phydsp_write(tp, 0x000a, 0x310b);
2711 tg3_phydsp_write(tp, 0x201f, 0x9506);
2712 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2713 tg3_phy_toggle_auxctl_smdsp(tp, false);
2715 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2716 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2717 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2718 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2719 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2720 tg3_writephy(tp, MII_TG3_TEST1,
2721 MII_TG3_TEST1_TRIM_EN | 0x4);
2723 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2725 tg3_phy_toggle_auxctl_smdsp(tp, false);
2729 /* Set Extended packet length bit (bit 14) on all chips that */
2730 /* support jumbo frames */
2731 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2732 /* Cannot do read-modify-write on 5401 */
2733 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2734 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2735 /* Set bit 14 with read-modify-write to preserve other bits */
2736 err = tg3_phy_auxctl_read(tp,
2737 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2739 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2740 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2743 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2744 * jumbo frames transmission.
2746 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2747 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2748 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2749 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2752 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2753 /* adjust output voltage */
2754 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2757 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2758 tg3_phydsp_write(tp, 0xffb, 0x4000);
2760 tg3_phy_toggle_automdix(tp, true);
2761 tg3_phy_set_wirespeed(tp);
2765 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2766 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2767 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2768 TG3_GPIO_MSG_NEED_VAUX)
2769 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2770 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2771 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2772 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2773 (TG3_GPIO_MSG_DRVR_PRES << 12))
2775 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2776 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2777 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2778 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2779 (TG3_GPIO_MSG_NEED_VAUX << 12))
2781 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2785 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2786 tg3_asic_rev(tp) == ASIC_REV_5719)
2787 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2789 status = tr32(TG3_CPMU_DRV_STATUS);
2791 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2792 status &= ~(TG3_GPIO_MSG_MASK << shift);
2793 status |= (newstat << shift);
2795 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2796 tg3_asic_rev(tp) == ASIC_REV_5719)
2797 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2799 tw32(TG3_CPMU_DRV_STATUS, status);
2801 return status >> TG3_APE_GPIO_MSG_SHIFT;
2804 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2806 if (!tg3_flag(tp, IS_NIC))
2809 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2810 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2811 tg3_asic_rev(tp) == ASIC_REV_5720) {
2812 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2815 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2817 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2818 TG3_GRC_LCLCTL_PWRSW_DELAY);
2820 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2822 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2823 TG3_GRC_LCLCTL_PWRSW_DELAY);
2829 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2833 if (!tg3_flag(tp, IS_NIC) ||
2834 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2835 tg3_asic_rev(tp) == ASIC_REV_5701)
2838 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2840 tw32_wait_f(GRC_LOCAL_CTRL,
2841 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2842 TG3_GRC_LCLCTL_PWRSW_DELAY);
2844 tw32_wait_f(GRC_LOCAL_CTRL,
2846 TG3_GRC_LCLCTL_PWRSW_DELAY);
2848 tw32_wait_f(GRC_LOCAL_CTRL,
2849 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2850 TG3_GRC_LCLCTL_PWRSW_DELAY);
2853 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2855 if (!tg3_flag(tp, IS_NIC))
2858 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2859 tg3_asic_rev(tp) == ASIC_REV_5701) {
2860 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2861 (GRC_LCLCTRL_GPIO_OE0 |
2862 GRC_LCLCTRL_GPIO_OE1 |
2863 GRC_LCLCTRL_GPIO_OE2 |
2864 GRC_LCLCTRL_GPIO_OUTPUT0 |
2865 GRC_LCLCTRL_GPIO_OUTPUT1),
2866 TG3_GRC_LCLCTL_PWRSW_DELAY);
2867 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2868 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2869 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2870 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2871 GRC_LCLCTRL_GPIO_OE1 |
2872 GRC_LCLCTRL_GPIO_OE2 |
2873 GRC_LCLCTRL_GPIO_OUTPUT0 |
2874 GRC_LCLCTRL_GPIO_OUTPUT1 |
2876 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2877 TG3_GRC_LCLCTL_PWRSW_DELAY);
2879 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2880 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2881 TG3_GRC_LCLCTL_PWRSW_DELAY);
2883 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2884 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2885 TG3_GRC_LCLCTL_PWRSW_DELAY);
2888 u32 grc_local_ctrl = 0;
2890 /* Workaround to prevent overdrawing Amps. */
2891 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2892 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2893 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2895 TG3_GRC_LCLCTL_PWRSW_DELAY);
2898 /* On 5753 and variants, GPIO2 cannot be used. */
2899 no_gpio2 = tp->nic_sram_data_cfg &
2900 NIC_SRAM_DATA_CFG_NO_GPIO2;
2902 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2903 GRC_LCLCTRL_GPIO_OE1 |
2904 GRC_LCLCTRL_GPIO_OE2 |
2905 GRC_LCLCTRL_GPIO_OUTPUT1 |
2906 GRC_LCLCTRL_GPIO_OUTPUT2;
2908 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2909 GRC_LCLCTRL_GPIO_OUTPUT2);
2911 tw32_wait_f(GRC_LOCAL_CTRL,
2912 tp->grc_local_ctrl | grc_local_ctrl,
2913 TG3_GRC_LCLCTL_PWRSW_DELAY);
2915 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2917 tw32_wait_f(GRC_LOCAL_CTRL,
2918 tp->grc_local_ctrl | grc_local_ctrl,
2919 TG3_GRC_LCLCTL_PWRSW_DELAY);
2922 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2923 tw32_wait_f(GRC_LOCAL_CTRL,
2924 tp->grc_local_ctrl | grc_local_ctrl,
2925 TG3_GRC_LCLCTL_PWRSW_DELAY);
2930 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2934 /* Serialize power state transitions */
2935 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2938 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2939 msg = TG3_GPIO_MSG_NEED_VAUX;
2941 msg = tg3_set_function_status(tp, msg);
2943 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2946 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2947 tg3_pwrsrc_switch_to_vaux(tp);
2949 tg3_pwrsrc_die_with_vmain(tp);
2952 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2955 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2957 bool need_vaux = false;
2959 /* The GPIOs do something completely different on 57765. */
2960 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2963 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2964 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2965 tg3_asic_rev(tp) == ASIC_REV_5720) {
2966 tg3_frob_aux_power_5717(tp, include_wol ?
2967 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2971 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2972 struct net_device *dev_peer;
2974 dev_peer = pci_get_drvdata(tp->pdev_peer);
2976 /* remove_one() may have been run on the peer. */
2978 struct tg3 *tp_peer = netdev_priv(dev_peer);
2980 if (tg3_flag(tp_peer, INIT_COMPLETE))
2983 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2984 tg3_flag(tp_peer, ENABLE_ASF))
2989 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2990 tg3_flag(tp, ENABLE_ASF))
2994 tg3_pwrsrc_switch_to_vaux(tp);
2996 tg3_pwrsrc_die_with_vmain(tp);
2999 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3001 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3003 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3004 if (speed != SPEED_10)
3006 } else if (speed == SPEED_10)
3012 static bool tg3_phy_power_bug(struct tg3 *tp)
3014 switch (tg3_asic_rev(tp)) {
3019 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3028 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3037 static bool tg3_phy_led_bug(struct tg3 *tp)
3039 switch (tg3_asic_rev(tp)) {
3042 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3051 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3055 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3058 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3059 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3060 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3061 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3064 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3065 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3066 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3071 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3073 val = tr32(GRC_MISC_CFG);
3074 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3077 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3079 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3082 tg3_writephy(tp, MII_ADVERTISE, 0);
3083 tg3_writephy(tp, MII_BMCR,
3084 BMCR_ANENABLE | BMCR_ANRESTART);
3086 tg3_writephy(tp, MII_TG3_FET_TEST,
3087 phytest | MII_TG3_FET_SHADOW_EN);
3088 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3089 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3091 MII_TG3_FET_SHDW_AUXMODE4,
3094 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3097 } else if (do_low_power) {
3098 if (!tg3_phy_led_bug(tp))
3099 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3100 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3102 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3103 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3104 MII_TG3_AUXCTL_PCTL_VREG_11V;
3105 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3108 /* The PHY should not be powered down on some chips because
3111 if (tg3_phy_power_bug(tp))
3114 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3115 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3116 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3117 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3118 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3119 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3122 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3125 /* tp->lock is held. */
3126 static int tg3_nvram_lock(struct tg3 *tp)
3128 if (tg3_flag(tp, NVRAM)) {
3131 if (tp->nvram_lock_cnt == 0) {
3132 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3133 for (i = 0; i < 8000; i++) {
3134 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3139 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3143 tp->nvram_lock_cnt++;
3148 /* tp->lock is held. */
3149 static void tg3_nvram_unlock(struct tg3 *tp)
3151 if (tg3_flag(tp, NVRAM)) {
3152 if (tp->nvram_lock_cnt > 0)
3153 tp->nvram_lock_cnt--;
3154 if (tp->nvram_lock_cnt == 0)
3155 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3159 /* tp->lock is held. */
3160 static void tg3_enable_nvram_access(struct tg3 *tp)
3162 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3163 u32 nvaccess = tr32(NVRAM_ACCESS);
3165 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3169 /* tp->lock is held. */
3170 static void tg3_disable_nvram_access(struct tg3 *tp)
3172 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3173 u32 nvaccess = tr32(NVRAM_ACCESS);
3175 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3179 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3180 u32 offset, u32 *val)
3185 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3188 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3189 EEPROM_ADDR_DEVID_MASK |
3191 tw32(GRC_EEPROM_ADDR,
3193 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3194 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3195 EEPROM_ADDR_ADDR_MASK) |
3196 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3198 for (i = 0; i < 1000; i++) {
3199 tmp = tr32(GRC_EEPROM_ADDR);
3201 if (tmp & EEPROM_ADDR_COMPLETE)
3205 if (!(tmp & EEPROM_ADDR_COMPLETE))
3208 tmp = tr32(GRC_EEPROM_DATA);
3211 * The data will always be opposite the native endian
3212 * format. Perform a blind byteswap to compensate.
3219 #define NVRAM_CMD_TIMEOUT 5000
3221 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3225 tw32(NVRAM_CMD, nvram_cmd);
3226 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3227 usleep_range(10, 40);
3228 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3234 if (i == NVRAM_CMD_TIMEOUT)
3240 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3242 if (tg3_flag(tp, NVRAM) &&
3243 tg3_flag(tp, NVRAM_BUFFERED) &&
3244 tg3_flag(tp, FLASH) &&
3245 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3246 (tp->nvram_jedecnum == JEDEC_ATMEL))
3248 addr = ((addr / tp->nvram_pagesize) <<
3249 ATMEL_AT45DB0X1B_PAGE_POS) +
3250 (addr % tp->nvram_pagesize);
3255 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3257 if (tg3_flag(tp, NVRAM) &&
3258 tg3_flag(tp, NVRAM_BUFFERED) &&
3259 tg3_flag(tp, FLASH) &&
3260 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3261 (tp->nvram_jedecnum == JEDEC_ATMEL))
3263 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3264 tp->nvram_pagesize) +
3265 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3270 /* NOTE: Data read in from NVRAM is byteswapped according to
3271 * the byteswapping settings for all other register accesses.
3272 * tg3 devices are BE devices, so on a BE machine, the data
3273 * returned will be exactly as it is seen in NVRAM. On a LE
3274 * machine, the 32-bit value will be byteswapped.
3276 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3280 if (!tg3_flag(tp, NVRAM))
3281 return tg3_nvram_read_using_eeprom(tp, offset, val);
3283 offset = tg3_nvram_phys_addr(tp, offset);
3285 if (offset > NVRAM_ADDR_MSK)
3288 ret = tg3_nvram_lock(tp);
3292 tg3_enable_nvram_access(tp);
3294 tw32(NVRAM_ADDR, offset);
3295 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3296 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3299 *val = tr32(NVRAM_RDDATA);
3301 tg3_disable_nvram_access(tp);
3303 tg3_nvram_unlock(tp);
3308 /* Ensures NVRAM data is in bytestream format. */
3309 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3312 int res = tg3_nvram_read(tp, offset, &v);
3314 *val = cpu_to_be32(v);
3318 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3319 u32 offset, u32 len, u8 *buf)
3324 for (i = 0; i < len; i += 4) {
3330 memcpy(&data, buf + i, 4);
3333 * The SEEPROM interface expects the data to always be opposite
3334 * the native endian format. We accomplish this by reversing
3335 * all the operations that would have been performed on the
3336 * data from a call to tg3_nvram_read_be32().
3338 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3340 val = tr32(GRC_EEPROM_ADDR);
3341 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3343 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3345 tw32(GRC_EEPROM_ADDR, val |
3346 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3347 (addr & EEPROM_ADDR_ADDR_MASK) |
3351 for (j = 0; j < 1000; j++) {
3352 val = tr32(GRC_EEPROM_ADDR);
3354 if (val & EEPROM_ADDR_COMPLETE)
3358 if (!(val & EEPROM_ADDR_COMPLETE)) {
3367 /* offset and length are dword aligned */
3368 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3372 u32 pagesize = tp->nvram_pagesize;
3373 u32 pagemask = pagesize - 1;
3377 tmp = kmalloc(pagesize, GFP_KERNEL);
3383 u32 phy_addr, page_off, size;
3385 phy_addr = offset & ~pagemask;
3387 for (j = 0; j < pagesize; j += 4) {
3388 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3389 (__be32 *) (tmp + j));
3396 page_off = offset & pagemask;
3403 memcpy(tmp + page_off, buf, size);
3405 offset = offset + (pagesize - page_off);
3407 tg3_enable_nvram_access(tp);
3410 * Before we can erase the flash page, we need
3411 * to issue a special "write enable" command.
3413 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3415 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3418 /* Erase the target page */
3419 tw32(NVRAM_ADDR, phy_addr);
3421 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3422 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3424 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3427 /* Issue another write enable to start the write. */
3428 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3430 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3433 for (j = 0; j < pagesize; j += 4) {
3436 data = *((__be32 *) (tmp + j));
3438 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3440 tw32(NVRAM_ADDR, phy_addr + j);
3442 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3446 nvram_cmd |= NVRAM_CMD_FIRST;
3447 else if (j == (pagesize - 4))
3448 nvram_cmd |= NVRAM_CMD_LAST;
3450 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3458 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3459 tg3_nvram_exec_cmd(tp, nvram_cmd);
3466 /* offset and length are dword aligned */
3467 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3472 for (i = 0; i < len; i += 4, offset += 4) {
3473 u32 page_off, phy_addr, nvram_cmd;
3476 memcpy(&data, buf + i, 4);
3477 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3479 page_off = offset % tp->nvram_pagesize;
3481 phy_addr = tg3_nvram_phys_addr(tp, offset);
3483 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3485 if (page_off == 0 || i == 0)
3486 nvram_cmd |= NVRAM_CMD_FIRST;
3487 if (page_off == (tp->nvram_pagesize - 4))
3488 nvram_cmd |= NVRAM_CMD_LAST;
3491 nvram_cmd |= NVRAM_CMD_LAST;
3493 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3494 !tg3_flag(tp, FLASH) ||
3495 !tg3_flag(tp, 57765_PLUS))
3496 tw32(NVRAM_ADDR, phy_addr);
3498 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3499 !tg3_flag(tp, 5755_PLUS) &&
3500 (tp->nvram_jedecnum == JEDEC_ST) &&
3501 (nvram_cmd & NVRAM_CMD_FIRST)) {
3504 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3505 ret = tg3_nvram_exec_cmd(tp, cmd);
3509 if (!tg3_flag(tp, FLASH)) {
3510 /* We always do complete word writes to eeprom. */
3511 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3514 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3521 /* offset and length are dword aligned */
3522 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3526 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3527 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3528 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3532 if (!tg3_flag(tp, NVRAM)) {
3533 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3537 ret = tg3_nvram_lock(tp);
3541 tg3_enable_nvram_access(tp);
3542 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3543 tw32(NVRAM_WRITE1, 0x406);
3545 grc_mode = tr32(GRC_MODE);
3546 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3548 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3549 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3552 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3556 grc_mode = tr32(GRC_MODE);
3557 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3559 tg3_disable_nvram_access(tp);
3560 tg3_nvram_unlock(tp);
3563 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3564 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3571 #define RX_CPU_SCRATCH_BASE 0x30000
3572 #define RX_CPU_SCRATCH_SIZE 0x04000
3573 #define TX_CPU_SCRATCH_BASE 0x34000
3574 #define TX_CPU_SCRATCH_SIZE 0x04000
3576 /* tp->lock is held. */
3577 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3580 const int iters = 10000;
3582 for (i = 0; i < iters; i++) {
3583 tw32(cpu_base + CPU_STATE, 0xffffffff);
3584 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3585 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3587 if (pci_channel_offline(tp->pdev))
3591 return (i == iters) ? -EBUSY : 0;
3594 /* tp->lock is held. */
3595 static int tg3_rxcpu_pause(struct tg3 *tp)
3597 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3599 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3600 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3606 /* tp->lock is held. */
3607 static int tg3_txcpu_pause(struct tg3 *tp)
3609 return tg3_pause_cpu(tp, TX_CPU_BASE);
3612 /* tp->lock is held. */
3613 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3615 tw32(cpu_base + CPU_STATE, 0xffffffff);
3616 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3619 /* tp->lock is held. */
3620 static void tg3_rxcpu_resume(struct tg3 *tp)
3622 tg3_resume_cpu(tp, RX_CPU_BASE);
3625 /* tp->lock is held. */
3626 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3630 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3632 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3633 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3635 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3638 if (cpu_base == RX_CPU_BASE) {
3639 rc = tg3_rxcpu_pause(tp);
3642 * There is only an Rx CPU for the 5750 derivative in the
3645 if (tg3_flag(tp, IS_SSB_CORE))
3648 rc = tg3_txcpu_pause(tp);
3652 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3653 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3657 /* Clear firmware's nvram arbitration. */
3658 if (tg3_flag(tp, NVRAM))
3659 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3663 static int tg3_fw_data_len(struct tg3 *tp,
3664 const struct tg3_firmware_hdr *fw_hdr)
3668 /* Non fragmented firmware have one firmware header followed by a
3669 * contiguous chunk of data to be written. The length field in that
3670 * header is not the length of data to be written but the complete
3671 * length of the bss. The data length is determined based on
3672 * tp->fw->size minus headers.
3674 * Fragmented firmware have a main header followed by multiple
3675 * fragments. Each fragment is identical to non fragmented firmware
3676 * with a firmware header followed by a contiguous chunk of data. In
3677 * the main header, the length field is unused and set to 0xffffffff.
3678 * In each fragment header the length is the entire size of that
3679 * fragment i.e. fragment data + header length. Data length is
3680 * therefore length field in the header minus TG3_FW_HDR_LEN.
3682 if (tp->fw_len == 0xffffffff)
3683 fw_len = be32_to_cpu(fw_hdr->len);
3685 fw_len = tp->fw->size;
3687 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3690 /* tp->lock is held. */
3691 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3692 u32 cpu_scratch_base, int cpu_scratch_size,
3693 const struct tg3_firmware_hdr *fw_hdr)
3696 void (*write_op)(struct tg3 *, u32, u32);
3697 int total_len = tp->fw->size;
3699 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3701 "%s: Trying to load TX cpu firmware which is 5705\n",
3706 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3707 write_op = tg3_write_mem;
3709 write_op = tg3_write_indirect_reg32;
3711 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3712 /* It is possible that bootcode is still loading at this point.
3713 * Get the nvram lock first before halting the cpu.
3715 int lock_err = tg3_nvram_lock(tp);
3716 err = tg3_halt_cpu(tp, cpu_base);
3718 tg3_nvram_unlock(tp);
3722 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3723 write_op(tp, cpu_scratch_base + i, 0);
3724 tw32(cpu_base + CPU_STATE, 0xffffffff);
3725 tw32(cpu_base + CPU_MODE,
3726 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3728 /* Subtract additional main header for fragmented firmware and
3729 * advance to the first fragment
3731 total_len -= TG3_FW_HDR_LEN;
3736 u32 *fw_data = (u32 *)(fw_hdr + 1);
3737 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3738 write_op(tp, cpu_scratch_base +
3739 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3741 be32_to_cpu(fw_data[i]));
3743 total_len -= be32_to_cpu(fw_hdr->len);
3745 /* Advance to next fragment */
3746 fw_hdr = (struct tg3_firmware_hdr *)
3747 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3748 } while (total_len > 0);
3756 /* tp->lock is held. */
3757 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3760 const int iters = 5;
3762 tw32(cpu_base + CPU_STATE, 0xffffffff);
3763 tw32_f(cpu_base + CPU_PC, pc);
3765 for (i = 0; i < iters; i++) {
3766 if (tr32(cpu_base + CPU_PC) == pc)
3768 tw32(cpu_base + CPU_STATE, 0xffffffff);
3769 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3770 tw32_f(cpu_base + CPU_PC, pc);
3774 return (i == iters) ? -EBUSY : 0;
3777 /* tp->lock is held. */
3778 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3780 const struct tg3_firmware_hdr *fw_hdr;
3783 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3785 /* Firmware blob starts with version numbers, followed by
3786 start address and length. We are setting complete length.
3787 length = end_address_of_bss - start_address_of_text.
3788 Remainder is the blob to be loaded contiguously
3789 from start address. */
3791 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3792 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3797 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3798 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3803 /* Now startup only the RX cpu. */
3804 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3805 be32_to_cpu(fw_hdr->base_addr));
3807 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3808 "should be %08x\n", __func__,
3809 tr32(RX_CPU_BASE + CPU_PC),
3810 be32_to_cpu(fw_hdr->base_addr));
3814 tg3_rxcpu_resume(tp);
3819 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3821 const int iters = 1000;
3825 /* Wait for boot code to complete initialization and enter service
3826 * loop. It is then safe to download service patches
3828 for (i = 0; i < iters; i++) {
3829 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3836 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3840 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3842 netdev_warn(tp->dev,
3843 "Other patches exist. Not downloading EEE patch\n");
3850 /* tp->lock is held. */
3851 static void tg3_load_57766_firmware(struct tg3 *tp)
3853 struct tg3_firmware_hdr *fw_hdr;
3855 if (!tg3_flag(tp, NO_NVRAM))
3858 if (tg3_validate_rxcpu_state(tp))
3864 /* This firmware blob has a different format than older firmware
3865 * releases as given below. The main difference is we have fragmented
3866 * data to be written to non-contiguous locations.
3868 * In the beginning we have a firmware header identical to other
3869 * firmware which consists of version, base addr and length. The length
3870 * here is unused and set to 0xffffffff.
3872 * This is followed by a series of firmware fragments which are
3873 * individually identical to previous firmware. i.e. they have the
3874 * firmware header and followed by data for that fragment. The version
3875 * field of the individual fragment header is unused.
3878 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3879 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3882 if (tg3_rxcpu_pause(tp))
3885 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3886 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3888 tg3_rxcpu_resume(tp);
3891 /* tp->lock is held. */
3892 static int tg3_load_tso_firmware(struct tg3 *tp)
3894 const struct tg3_firmware_hdr *fw_hdr;
3895 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3898 if (!tg3_flag(tp, FW_TSO))
3901 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3903 /* Firmware blob starts with version numbers, followed by
3904 start address and length. We are setting complete length.
3905 length = end_address_of_bss - start_address_of_text.
3906 Remainder is the blob to be loaded contiguously
3907 from start address. */
3909 cpu_scratch_size = tp->fw_len;
3911 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3912 cpu_base = RX_CPU_BASE;
3913 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3915 cpu_base = TX_CPU_BASE;
3916 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3917 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3920 err = tg3_load_firmware_cpu(tp, cpu_base,
3921 cpu_scratch_base, cpu_scratch_size,
3926 /* Now startup the cpu. */
3927 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3928 be32_to_cpu(fw_hdr->base_addr));
3931 "%s fails to set CPU PC, is %08x should be %08x\n",
3932 __func__, tr32(cpu_base + CPU_PC),
3933 be32_to_cpu(fw_hdr->base_addr));
3937 tg3_resume_cpu(tp, cpu_base);
3941 /* tp->lock is held. */
3942 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3944 u32 addr_high, addr_low;
3946 addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3947 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3948 (mac_addr[4] << 8) | mac_addr[5]);
3951 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3952 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3955 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3956 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3960 /* tp->lock is held. */
3961 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3966 for (i = 0; i < 4; i++) {
3967 if (i == 1 && skip_mac_1)
3969 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3972 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3973 tg3_asic_rev(tp) == ASIC_REV_5704) {
3974 for (i = 4; i < 16; i++)
3975 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3978 addr_high = (tp->dev->dev_addr[0] +
3979 tp->dev->dev_addr[1] +
3980 tp->dev->dev_addr[2] +
3981 tp->dev->dev_addr[3] +
3982 tp->dev->dev_addr[4] +
3983 tp->dev->dev_addr[5]) &
3984 TX_BACKOFF_SEED_MASK;
3985 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3988 static void tg3_enable_register_access(struct tg3 *tp)
3991 * Make sure register accesses (indirect or otherwise) will function
3994 pci_write_config_dword(tp->pdev,
3995 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3998 static int tg3_power_up(struct tg3 *tp)
4002 tg3_enable_register_access(tp);
4004 err = pci_set_power_state(tp->pdev, PCI_D0);
4006 /* Switch out of Vaux if it is a NIC */
4007 tg3_pwrsrc_switch_to_vmain(tp);
4009 netdev_err(tp->dev, "Transition to D0 failed\n");
4015 static int tg3_setup_phy(struct tg3 *, bool);
4017 static int tg3_power_down_prepare(struct tg3 *tp)
4020 bool device_should_wake, do_low_power;
4022 tg3_enable_register_access(tp);
4024 /* Restore the CLKREQ setting. */
4025 if (tg3_flag(tp, CLKREQ_BUG))
4026 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4027 PCI_EXP_LNKCTL_CLKREQ_EN);
4029 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4030 tw32(TG3PCI_MISC_HOST_CTRL,
4031 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4033 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4034 tg3_flag(tp, WOL_ENABLE);
4036 if (tg3_flag(tp, USE_PHYLIB)) {
4037 do_low_power = false;
4038 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4039 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4040 struct phy_device *phydev;
4041 u32 phyid, advertising;
4043 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4045 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4047 tp->link_config.speed = phydev->speed;
4048 tp->link_config.duplex = phydev->duplex;
4049 tp->link_config.autoneg = phydev->autoneg;
4050 tp->link_config.advertising = phydev->advertising;
4052 advertising = ADVERTISED_TP |
4054 ADVERTISED_Autoneg |
4055 ADVERTISED_10baseT_Half;
4057 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4058 if (tg3_flag(tp, WOL_SPEED_100MB))
4060 ADVERTISED_100baseT_Half |
4061 ADVERTISED_100baseT_Full |
4062 ADVERTISED_10baseT_Full;
4064 advertising |= ADVERTISED_10baseT_Full;
4067 phydev->advertising = advertising;
4069 phy_start_aneg(phydev);
4071 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4072 if (phyid != PHY_ID_BCMAC131) {
4073 phyid &= PHY_BCM_OUI_MASK;
4074 if (phyid == PHY_BCM_OUI_1 ||
4075 phyid == PHY_BCM_OUI_2 ||
4076 phyid == PHY_BCM_OUI_3)
4077 do_low_power = true;
4081 do_low_power = true;
4083 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4084 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4086 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4087 tg3_setup_phy(tp, false);
4090 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4093 val = tr32(GRC_VCPU_EXT_CTRL);
4094 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4095 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4099 for (i = 0; i < 200; i++) {
4100 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4101 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4106 if (tg3_flag(tp, WOL_CAP))
4107 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4108 WOL_DRV_STATE_SHUTDOWN |
4112 if (device_should_wake) {
4115 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4117 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4118 tg3_phy_auxctl_write(tp,
4119 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4120 MII_TG3_AUXCTL_PCTL_WOL_EN |
4121 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4122 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4126 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4127 mac_mode = MAC_MODE_PORT_MODE_GMII;
4128 else if (tp->phy_flags &
4129 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4130 if (tp->link_config.active_speed == SPEED_1000)
4131 mac_mode = MAC_MODE_PORT_MODE_GMII;
4133 mac_mode = MAC_MODE_PORT_MODE_MII;
4135 mac_mode = MAC_MODE_PORT_MODE_MII;
4137 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4138 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4139 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4140 SPEED_100 : SPEED_10;
4141 if (tg3_5700_link_polarity(tp, speed))
4142 mac_mode |= MAC_MODE_LINK_POLARITY;
4144 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4147 mac_mode = MAC_MODE_PORT_MODE_TBI;
4150 if (!tg3_flag(tp, 5750_PLUS))
4151 tw32(MAC_LED_CTRL, tp->led_ctrl);
4153 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4154 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4155 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4156 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4158 if (tg3_flag(tp, ENABLE_APE))
4159 mac_mode |= MAC_MODE_APE_TX_EN |
4160 MAC_MODE_APE_RX_EN |
4161 MAC_MODE_TDE_ENABLE;
4163 tw32_f(MAC_MODE, mac_mode);
4166 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4170 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4171 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4172 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4175 base_val = tp->pci_clock_ctrl;
4176 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4177 CLOCK_CTRL_TXCLK_DISABLE);
4179 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4180 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4181 } else if (tg3_flag(tp, 5780_CLASS) ||
4182 tg3_flag(tp, CPMU_PRESENT) ||
4183 tg3_asic_rev(tp) == ASIC_REV_5906) {
4185 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4186 u32 newbits1, newbits2;
4188 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4189 tg3_asic_rev(tp) == ASIC_REV_5701) {
4190 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4191 CLOCK_CTRL_TXCLK_DISABLE |
4193 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4194 } else if (tg3_flag(tp, 5705_PLUS)) {
4195 newbits1 = CLOCK_CTRL_625_CORE;
4196 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4198 newbits1 = CLOCK_CTRL_ALTCLK;
4199 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4202 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4205 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4208 if (!tg3_flag(tp, 5705_PLUS)) {
4211 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4212 tg3_asic_rev(tp) == ASIC_REV_5701) {
4213 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4214 CLOCK_CTRL_TXCLK_DISABLE |
4215 CLOCK_CTRL_44MHZ_CORE);
4217 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4220 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4221 tp->pci_clock_ctrl | newbits3, 40);
4225 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4226 tg3_power_down_phy(tp, do_low_power);
4228 tg3_frob_aux_power(tp, true);
4230 /* Workaround for unstable PLL clock */
4231 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4232 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4233 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4234 u32 val = tr32(0x7d00);
4236 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4238 if (!tg3_flag(tp, ENABLE_ASF)) {
4241 err = tg3_nvram_lock(tp);
4242 tg3_halt_cpu(tp, RX_CPU_BASE);
4244 tg3_nvram_unlock(tp);
4248 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4250 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4255 static void tg3_power_down(struct tg3 *tp)
4257 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4258 pci_set_power_state(tp->pdev, PCI_D3hot);
4261 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4263 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4264 case MII_TG3_AUX_STAT_10HALF:
4266 *duplex = DUPLEX_HALF;
4269 case MII_TG3_AUX_STAT_10FULL:
4271 *duplex = DUPLEX_FULL;
4274 case MII_TG3_AUX_STAT_100HALF:
4276 *duplex = DUPLEX_HALF;
4279 case MII_TG3_AUX_STAT_100FULL:
4281 *duplex = DUPLEX_FULL;
4284 case MII_TG3_AUX_STAT_1000HALF:
4285 *speed = SPEED_1000;
4286 *duplex = DUPLEX_HALF;
4289 case MII_TG3_AUX_STAT_1000FULL:
4290 *speed = SPEED_1000;
4291 *duplex = DUPLEX_FULL;
4295 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4296 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4298 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4302 *speed = SPEED_UNKNOWN;
4303 *duplex = DUPLEX_UNKNOWN;
4308 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4313 new_adv = ADVERTISE_CSMA;
4314 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4315 new_adv |= mii_advertise_flowctrl(flowctrl);
4317 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4321 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4322 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4324 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4325 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4326 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4328 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4333 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4336 tw32(TG3_CPMU_EEE_MODE,
4337 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4339 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4344 /* Advertise 100-BaseTX EEE ability */
4345 if (advertise & ADVERTISED_100baseT_Full)
4346 val |= MDIO_AN_EEE_ADV_100TX;
4347 /* Advertise 1000-BaseT EEE ability */
4348 if (advertise & ADVERTISED_1000baseT_Full)
4349 val |= MDIO_AN_EEE_ADV_1000T;
4351 if (!tp->eee.eee_enabled) {
4353 tp->eee.advertised = 0;
4355 tp->eee.advertised = advertise &
4356 (ADVERTISED_100baseT_Full |
4357 ADVERTISED_1000baseT_Full);
4360 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4364 switch (tg3_asic_rev(tp)) {
4366 case ASIC_REV_57765:
4367 case ASIC_REV_57766:
4369 /* If we advertised any eee advertisements above... */
4371 val = MII_TG3_DSP_TAP26_ALNOKO |
4372 MII_TG3_DSP_TAP26_RMRXSTO |
4373 MII_TG3_DSP_TAP26_OPCSINPT;
4374 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4378 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4379 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4380 MII_TG3_DSP_CH34TP2_HIBW01);
4383 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4392 static void tg3_phy_copper_begin(struct tg3 *tp)
4394 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4395 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4398 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4399 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4400 adv = ADVERTISED_10baseT_Half |
4401 ADVERTISED_10baseT_Full;
4402 if (tg3_flag(tp, WOL_SPEED_100MB))
4403 adv |= ADVERTISED_100baseT_Half |
4404 ADVERTISED_100baseT_Full;
4405 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4406 if (!(tp->phy_flags &
4407 TG3_PHYFLG_DISABLE_1G_HD_ADV))
4408 adv |= ADVERTISED_1000baseT_Half;
4409 adv |= ADVERTISED_1000baseT_Full;
4412 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4414 adv = tp->link_config.advertising;
4415 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4416 adv &= ~(ADVERTISED_1000baseT_Half |
4417 ADVERTISED_1000baseT_Full);
4419 fc = tp->link_config.flowctrl;
4422 tg3_phy_autoneg_cfg(tp, adv, fc);
4424 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4425 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4426 /* Normally during power down we want to autonegotiate
4427 * the lowest possible speed for WOL. However, to avoid
4428 * link flap, we leave it untouched.
4433 tg3_writephy(tp, MII_BMCR,
4434 BMCR_ANENABLE | BMCR_ANRESTART);
4437 u32 bmcr, orig_bmcr;
4439 tp->link_config.active_speed = tp->link_config.speed;
4440 tp->link_config.active_duplex = tp->link_config.duplex;
4442 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4443 /* With autoneg disabled, 5715 only links up when the
4444 * advertisement register has the configured speed
4447 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4451 switch (tp->link_config.speed) {
4457 bmcr |= BMCR_SPEED100;
4461 bmcr |= BMCR_SPEED1000;
4465 if (tp->link_config.duplex == DUPLEX_FULL)
4466 bmcr |= BMCR_FULLDPLX;
4468 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4469 (bmcr != orig_bmcr)) {
4470 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4471 for (i = 0; i < 1500; i++) {
4475 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4476 tg3_readphy(tp, MII_BMSR, &tmp))
4478 if (!(tmp & BMSR_LSTATUS)) {
4483 tg3_writephy(tp, MII_BMCR, bmcr);
4489 static int tg3_phy_pull_config(struct tg3 *tp)
4494 err = tg3_readphy(tp, MII_BMCR, &val);
4498 if (!(val & BMCR_ANENABLE)) {
4499 tp->link_config.autoneg = AUTONEG_DISABLE;
4500 tp->link_config.advertising = 0;
4501 tg3_flag_clear(tp, PAUSE_AUTONEG);
4505 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4507 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4510 tp->link_config.speed = SPEED_10;
4513 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4516 tp->link_config.speed = SPEED_100;
4518 case BMCR_SPEED1000:
4519 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4520 tp->link_config.speed = SPEED_1000;
4528 if (val & BMCR_FULLDPLX)
4529 tp->link_config.duplex = DUPLEX_FULL;
4531 tp->link_config.duplex = DUPLEX_HALF;
4533 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4539 tp->link_config.autoneg = AUTONEG_ENABLE;
4540 tp->link_config.advertising = ADVERTISED_Autoneg;
4541 tg3_flag_set(tp, PAUSE_AUTONEG);
4543 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4546 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4550 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4551 tp->link_config.advertising |= adv | ADVERTISED_TP;
4553 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4555 tp->link_config.advertising |= ADVERTISED_FIBRE;
4558 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4561 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4562 err = tg3_readphy(tp, MII_CTRL1000, &val);
4566 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4568 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4572 adv = tg3_decode_flowctrl_1000X(val);
4573 tp->link_config.flowctrl = adv;
4575 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4576 adv = mii_adv_to_ethtool_adv_x(val);
4579 tp->link_config.advertising |= adv;
4586 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4590 /* Turn off tap power management. */
4591 /* Set Extended packet length bit */
4592 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4594 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4595 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4596 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4597 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4598 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4605 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4607 struct ethtool_eee eee;
4609 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4612 tg3_eee_pull_config(tp, &eee);
4614 if (tp->eee.eee_enabled) {
4615 if (tp->eee.advertised != eee.advertised ||
4616 tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4617 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4620 /* EEE is disabled but we're advertising */
4628 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4630 u32 advmsk, tgtadv, advertising;
4632 advertising = tp->link_config.advertising;
4633 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4635 advmsk = ADVERTISE_ALL;
4636 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4637 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4638 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4641 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4644 if ((*lcladv & advmsk) != tgtadv)
4647 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4650 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4652 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4656 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4657 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4658 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4659 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4660 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4662 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4665 if (tg3_ctrl != tgtadv)
4672 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4676 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4679 if (tg3_readphy(tp, MII_STAT1000, &val))
4682 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4685 if (tg3_readphy(tp, MII_LPA, rmtadv))
4688 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4689 tp->link_config.rmt_adv = lpeth;
4694 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4696 if (curr_link_up != tp->link_up) {
4698 netif_carrier_on(tp->dev);
4700 netif_carrier_off(tp->dev);
4701 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4702 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4705 tg3_link_report(tp);
4712 static void tg3_clear_mac_status(struct tg3 *tp)
4717 MAC_STATUS_SYNC_CHANGED |
4718 MAC_STATUS_CFG_CHANGED |
4719 MAC_STATUS_MI_COMPLETION |
4720 MAC_STATUS_LNKSTATE_CHANGED);
4724 static void tg3_setup_eee(struct tg3 *tp)
4728 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4729 TG3_CPMU_EEE_LNKIDL_UART_IDL;
4730 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4731 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4733 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4735 tw32_f(TG3_CPMU_EEE_CTRL,
4736 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4738 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4739 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4740 TG3_CPMU_EEEMD_LPI_IN_RX |
4741 TG3_CPMU_EEEMD_EEE_ENABLE;
4743 if (tg3_asic_rev(tp) != ASIC_REV_5717)
4744 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4746 if (tg3_flag(tp, ENABLE_APE))
4747 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4749 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4751 tw32_f(TG3_CPMU_EEE_DBTMR1,
4752 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4753 (tp->eee.tx_lpi_timer & 0xffff));
4755 tw32_f(TG3_CPMU_EEE_DBTMR2,
4756 TG3_CPMU_DBTMR2_APE_TX_2047US |
4757 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4760 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4762 bool current_link_up;
4764 u32 lcl_adv, rmt_adv;
4769 tg3_clear_mac_status(tp);
4771 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4773 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4777 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4779 /* Some third-party PHYs need to be reset on link going
4782 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4783 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4784 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4786 tg3_readphy(tp, MII_BMSR, &bmsr);
4787 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4788 !(bmsr & BMSR_LSTATUS))
4794 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4795 tg3_readphy(tp, MII_BMSR, &bmsr);
4796 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4797 !tg3_flag(tp, INIT_COMPLETE))
4800 if (!(bmsr & BMSR_LSTATUS)) {
4801 err = tg3_init_5401phy_dsp(tp);
4805 tg3_readphy(tp, MII_BMSR, &bmsr);
4806 for (i = 0; i < 1000; i++) {
4808 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4809 (bmsr & BMSR_LSTATUS)) {
4815 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4816 TG3_PHY_REV_BCM5401_B0 &&
4817 !(bmsr & BMSR_LSTATUS) &&
4818 tp->link_config.active_speed == SPEED_1000) {
4819 err = tg3_phy_reset(tp);
4821 err = tg3_init_5401phy_dsp(tp);
4826 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4827 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4828 /* 5701 {A0,B0} CRC bug workaround */
4829 tg3_writephy(tp, 0x15, 0x0a75);
4830 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4831 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4832 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4835 /* Clear pending interrupts... */
4836 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4837 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4839 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4840 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4841 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4842 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4844 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4845 tg3_asic_rev(tp) == ASIC_REV_5701) {
4846 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4847 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4848 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4850 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4853 current_link_up = false;
4854 current_speed = SPEED_UNKNOWN;
4855 current_duplex = DUPLEX_UNKNOWN;
4856 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4857 tp->link_config.rmt_adv = 0;
4859 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4860 err = tg3_phy_auxctl_read(tp,
4861 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4863 if (!err && !(val & (1 << 10))) {
4864 tg3_phy_auxctl_write(tp,
4865 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4872 for (i = 0; i < 100; i++) {
4873 tg3_readphy(tp, MII_BMSR, &bmsr);
4874 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4875 (bmsr & BMSR_LSTATUS))
4880 if (bmsr & BMSR_LSTATUS) {
4883 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4884 for (i = 0; i < 2000; i++) {
4886 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4891 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4896 for (i = 0; i < 200; i++) {
4897 tg3_readphy(tp, MII_BMCR, &bmcr);
4898 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4900 if (bmcr && bmcr != 0x7fff)
4908 tp->link_config.active_speed = current_speed;
4909 tp->link_config.active_duplex = current_duplex;
4911 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4912 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4914 if ((bmcr & BMCR_ANENABLE) &&
4916 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4917 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4918 current_link_up = true;
4920 /* EEE settings changes take effect only after a phy
4921 * reset. If we have skipped a reset due to Link Flap
4922 * Avoidance being enabled, do it now.
4924 if (!eee_config_ok &&
4925 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4931 if (!(bmcr & BMCR_ANENABLE) &&
4932 tp->link_config.speed == current_speed &&
4933 tp->link_config.duplex == current_duplex) {
4934 current_link_up = true;
4938 if (current_link_up &&
4939 tp->link_config.active_duplex == DUPLEX_FULL) {
4942 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4943 reg = MII_TG3_FET_GEN_STAT;
4944 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4946 reg = MII_TG3_EXT_STAT;
4947 bit = MII_TG3_EXT_STAT_MDIX;
4950 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4951 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4953 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4958 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4959 tg3_phy_copper_begin(tp);
4961 if (tg3_flag(tp, ROBOSWITCH)) {
4962 current_link_up = true;
4963 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4964 current_speed = SPEED_1000;
4965 current_duplex = DUPLEX_FULL;
4966 tp->link_config.active_speed = current_speed;
4967 tp->link_config.active_duplex = current_duplex;
4970 tg3_readphy(tp, MII_BMSR, &bmsr);
4971 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4972 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4973 current_link_up = true;
4976 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4977 if (current_link_up) {
4978 if (tp->link_config.active_speed == SPEED_100 ||
4979 tp->link_config.active_speed == SPEED_10)
4980 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4982 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4983 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4984 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4986 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4988 /* In order for the 5750 core in BCM4785 chip to work properly
4989 * in RGMII mode, the Led Control Register must be set up.
4991 if (tg3_flag(tp, RGMII_MODE)) {
4992 u32 led_ctrl = tr32(MAC_LED_CTRL);
4993 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4995 if (tp->link_config.active_speed == SPEED_10)
4996 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4997 else if (tp->link_config.active_speed == SPEED_100)
4998 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4999 LED_CTRL_100MBPS_ON);
5000 else if (tp->link_config.active_speed == SPEED_1000)
5001 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5002 LED_CTRL_1000MBPS_ON);
5004 tw32(MAC_LED_CTRL, led_ctrl);
5008 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5009 if (tp->link_config.active_duplex == DUPLEX_HALF)
5010 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5012 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5013 if (current_link_up &&
5014 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5015 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5017 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5020 /* ??? Without this setting Netgear GA302T PHY does not
5021 * ??? send/receive packets...
5023 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5024 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5025 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5026 tw32_f(MAC_MI_MODE, tp->mi_mode);
5030 tw32_f(MAC_MODE, tp->mac_mode);
5033 tg3_phy_eee_adjust(tp, current_link_up);
5035 if (tg3_flag(tp, USE_LINKCHG_REG)) {
5036 /* Polled via timer. */
5037 tw32_f(MAC_EVENT, 0);
5039 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5043 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5045 tp->link_config.active_speed == SPEED_1000 &&
5046 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5049 (MAC_STATUS_SYNC_CHANGED |
5050 MAC_STATUS_CFG_CHANGED));
5053 NIC_SRAM_FIRMWARE_MBOX,
5054 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5057 /* Prevent send BD corruption. */
5058 if (tg3_flag(tp, CLKREQ_BUG)) {
5059 if (tp->link_config.active_speed == SPEED_100 ||
5060 tp->link_config.active_speed == SPEED_10)
5061 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5062 PCI_EXP_LNKCTL_CLKREQ_EN);
5064 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5065 PCI_EXP_LNKCTL_CLKREQ_EN);
5068 tg3_test_and_report_link_chg(tp, current_link_up);
5073 struct tg3_fiber_aneginfo {
5075 #define ANEG_STATE_UNKNOWN 0
5076 #define ANEG_STATE_AN_ENABLE 1
5077 #define ANEG_STATE_RESTART_INIT 2
5078 #define ANEG_STATE_RESTART 3
5079 #define ANEG_STATE_DISABLE_LINK_OK 4
5080 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5081 #define ANEG_STATE_ABILITY_DETECT 6
5082 #define ANEG_STATE_ACK_DETECT_INIT 7
5083 #define ANEG_STATE_ACK_DETECT 8
5084 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5085 #define ANEG_STATE_COMPLETE_ACK 10
5086 #define ANEG_STATE_IDLE_DETECT_INIT 11
5087 #define ANEG_STATE_IDLE_DETECT 12
5088 #define ANEG_STATE_LINK_OK 13
5089 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5090 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5093 #define MR_AN_ENABLE 0x00000001
5094 #define MR_RESTART_AN 0x00000002
5095 #define MR_AN_COMPLETE 0x00000004
5096 #define MR_PAGE_RX 0x00000008
5097 #define MR_NP_LOADED 0x00000010
5098 #define MR_TOGGLE_TX 0x00000020
5099 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5100 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5101 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5102 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5103 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5104 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5105 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5106 #define MR_TOGGLE_RX 0x00002000
5107 #define MR_NP_RX 0x00004000
5109 #define MR_LINK_OK 0x80000000
5111 unsigned long link_time, cur_time;
5113 u32 ability_match_cfg;
5114 int ability_match_count;
5116 char ability_match, idle_match, ack_match;
5118 u32 txconfig, rxconfig;
5119 #define ANEG_CFG_NP 0x00000080
5120 #define ANEG_CFG_ACK 0x00000040
5121 #define ANEG_CFG_RF2 0x00000020
5122 #define ANEG_CFG_RF1 0x00000010
5123 #define ANEG_CFG_PS2 0x00000001
5124 #define ANEG_CFG_PS1 0x00008000
5125 #define ANEG_CFG_HD 0x00004000
5126 #define ANEG_CFG_FD 0x00002000
5127 #define ANEG_CFG_INVAL 0x00001f06
5132 #define ANEG_TIMER_ENAB 2
5133 #define ANEG_FAILED -1
5135 #define ANEG_STATE_SETTLE_TIME 10000
5137 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5138 struct tg3_fiber_aneginfo *ap)
5141 unsigned long delta;
5145 if (ap->state == ANEG_STATE_UNKNOWN) {
5149 ap->ability_match_cfg = 0;
5150 ap->ability_match_count = 0;
5151 ap->ability_match = 0;
5157 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5158 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5160 if (rx_cfg_reg != ap->ability_match_cfg) {
5161 ap->ability_match_cfg = rx_cfg_reg;
5162 ap->ability_match = 0;
5163 ap->ability_match_count = 0;
5165 if (++ap->ability_match_count > 1) {
5166 ap->ability_match = 1;
5167 ap->ability_match_cfg = rx_cfg_reg;
5170 if (rx_cfg_reg & ANEG_CFG_ACK)
5178 ap->ability_match_cfg = 0;
5179 ap->ability_match_count = 0;
5180 ap->ability_match = 0;
5186 ap->rxconfig = rx_cfg_reg;
5189 switch (ap->state) {
5190 case ANEG_STATE_UNKNOWN:
5191 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5192 ap->state = ANEG_STATE_AN_ENABLE;
5195 case ANEG_STATE_AN_ENABLE:
5196 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5197 if (ap->flags & MR_AN_ENABLE) {
5200 ap->ability_match_cfg = 0;
5201 ap->ability_match_count = 0;
5202 ap->ability_match = 0;
5206 ap->state = ANEG_STATE_RESTART_INIT;
5208 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5212 case ANEG_STATE_RESTART_INIT:
5213 ap->link_time = ap->cur_time;
5214 ap->flags &= ~(MR_NP_LOADED);
5216 tw32(MAC_TX_AUTO_NEG, 0);
5217 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5218 tw32_f(MAC_MODE, tp->mac_mode);
5221 ret = ANEG_TIMER_ENAB;
5222 ap->state = ANEG_STATE_RESTART;
5225 case ANEG_STATE_RESTART:
5226 delta = ap->cur_time - ap->link_time;
5227 if (delta > ANEG_STATE_SETTLE_TIME)
5228 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5230 ret = ANEG_TIMER_ENAB;
5233 case ANEG_STATE_DISABLE_LINK_OK:
5237 case ANEG_STATE_ABILITY_DETECT_INIT:
5238 ap->flags &= ~(MR_TOGGLE_TX);
5239 ap->txconfig = ANEG_CFG_FD;
5240 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5241 if (flowctrl & ADVERTISE_1000XPAUSE)
5242 ap->txconfig |= ANEG_CFG_PS1;
5243 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5244 ap->txconfig |= ANEG_CFG_PS2;
5245 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5246 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5247 tw32_f(MAC_MODE, tp->mac_mode);
5250 ap->state = ANEG_STATE_ABILITY_DETECT;
5253 case ANEG_STATE_ABILITY_DETECT:
5254 if (ap->ability_match != 0 && ap->rxconfig != 0)
5255 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5258 case ANEG_STATE_ACK_DETECT_INIT:
5259 ap->txconfig |= ANEG_CFG_ACK;
5260 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5261 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5262 tw32_f(MAC_MODE, tp->mac_mode);
5265 ap->state = ANEG_STATE_ACK_DETECT;
5268 case ANEG_STATE_ACK_DETECT:
5269 if (ap->ack_match != 0) {
5270 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5271 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5272 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5274 ap->state = ANEG_STATE_AN_ENABLE;
5276 } else if (ap->ability_match != 0 &&
5277 ap->rxconfig == 0) {
5278 ap->state = ANEG_STATE_AN_ENABLE;
5282 case ANEG_STATE_COMPLETE_ACK_INIT:
5283 if (ap->rxconfig & ANEG_CFG_INVAL) {
5287 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5288 MR_LP_ADV_HALF_DUPLEX |
5289 MR_LP_ADV_SYM_PAUSE |
5290 MR_LP_ADV_ASYM_PAUSE |
5291 MR_LP_ADV_REMOTE_FAULT1 |
5292 MR_LP_ADV_REMOTE_FAULT2 |
5293 MR_LP_ADV_NEXT_PAGE |
5296 if (ap->rxconfig & ANEG_CFG_FD)
5297 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5298 if (ap->rxconfig & ANEG_CFG_HD)
5299 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5300 if (ap->rxconfig & ANEG_CFG_PS1)
5301 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5302 if (ap->rxconfig & ANEG_CFG_PS2)
5303 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5304 if (ap->rxconfig & ANEG_CFG_RF1)
5305 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5306 if (ap->rxconfig & ANEG_CFG_RF2)
5307 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5308 if (ap->rxconfig & ANEG_CFG_NP)
5309 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5311 ap->link_time = ap->cur_time;
5313 ap->flags ^= (MR_TOGGLE_TX);
5314 if (ap->rxconfig & 0x0008)
5315 ap->flags |= MR_TOGGLE_RX;
5316 if (ap->rxconfig & ANEG_CFG_NP)
5317 ap->flags |= MR_NP_RX;
5318 ap->flags |= MR_PAGE_RX;
5320 ap->state = ANEG_STATE_COMPLETE_ACK;
5321 ret = ANEG_TIMER_ENAB;
5324 case ANEG_STATE_COMPLETE_ACK:
5325 if (ap->ability_match != 0 &&
5326 ap->rxconfig == 0) {
5327 ap->state = ANEG_STATE_AN_ENABLE;
5330 delta = ap->cur_time - ap->link_time;
5331 if (delta > ANEG_STATE_SETTLE_TIME) {
5332 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5333 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5335 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5336 !(ap->flags & MR_NP_RX)) {
5337 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5345 case ANEG_STATE_IDLE_DETECT_INIT:
5346 ap->link_time = ap->cur_time;
5347 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5348 tw32_f(MAC_MODE, tp->mac_mode);
5351 ap->state = ANEG_STATE_IDLE_DETECT;
5352 ret = ANEG_TIMER_ENAB;
5355 case ANEG_STATE_IDLE_DETECT:
5356 if (ap->ability_match != 0 &&
5357 ap->rxconfig == 0) {
5358 ap->state = ANEG_STATE_AN_ENABLE;
5361 delta = ap->cur_time - ap->link_time;
5362 if (delta > ANEG_STATE_SETTLE_TIME) {
5363 /* XXX another gem from the Broadcom driver :( */
5364 ap->state = ANEG_STATE_LINK_OK;
5368 case ANEG_STATE_LINK_OK:
5369 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5373 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5374 /* ??? unimplemented */
5377 case ANEG_STATE_NEXT_PAGE_WAIT:
5378 /* ??? unimplemented */
5389 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5392 struct tg3_fiber_aneginfo aninfo;
5393 int status = ANEG_FAILED;
5397 tw32_f(MAC_TX_AUTO_NEG, 0);
5399 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5400 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5403 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5406 memset(&aninfo, 0, sizeof(aninfo));
5407 aninfo.flags |= MR_AN_ENABLE;
5408 aninfo.state = ANEG_STATE_UNKNOWN;
5409 aninfo.cur_time = 0;
5411 while (++tick < 195000) {
5412 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5413 if (status == ANEG_DONE || status == ANEG_FAILED)
5419 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5420 tw32_f(MAC_MODE, tp->mac_mode);
5423 *txflags = aninfo.txconfig;
5424 *rxflags = aninfo.flags;
5426 if (status == ANEG_DONE &&
5427 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5428 MR_LP_ADV_FULL_DUPLEX)))
5434 static void tg3_init_bcm8002(struct tg3 *tp)
5436 u32 mac_status = tr32(MAC_STATUS);
5439 /* Reset when initting first time or we have a link. */
5440 if (tg3_flag(tp, INIT_COMPLETE) &&
5441 !(mac_status & MAC_STATUS_PCS_SYNCED))
5444 /* Set PLL lock range. */
5445 tg3_writephy(tp, 0x16, 0x8007);
5448 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5450 /* Wait for reset to complete. */
5451 /* XXX schedule_timeout() ... */
5452 for (i = 0; i < 500; i++)
5455 /* Config mode; select PMA/Ch 1 regs. */
5456 tg3_writephy(tp, 0x10, 0x8411);
5458 /* Enable auto-lock and comdet, select txclk for tx. */
5459 tg3_writephy(tp, 0x11, 0x0a10);
5461 tg3_writephy(tp, 0x18, 0x00a0);
5462 tg3_writephy(tp, 0x16, 0x41ff);
5464 /* Assert and deassert POR. */
5465 tg3_writephy(tp, 0x13, 0x0400);
5467 tg3_writephy(tp, 0x13, 0x0000);
5469 tg3_writephy(tp, 0x11, 0x0a50);
5471 tg3_writephy(tp, 0x11, 0x0a10);
5473 /* Wait for signal to stabilize */
5474 /* XXX schedule_timeout() ... */
5475 for (i = 0; i < 15000; i++)
5478 /* Deselect the channel register so we can read the PHYID
5481 tg3_writephy(tp, 0x10, 0x8011);
5484 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5487 bool current_link_up;
5488 u32 sg_dig_ctrl, sg_dig_status;
5489 u32 serdes_cfg, expected_sg_dig_ctrl;
5490 int workaround, port_a;
5493 expected_sg_dig_ctrl = 0;
5496 current_link_up = false;
5498 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5499 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5501 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5504 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5505 /* preserve bits 20-23 for voltage regulator */
5506 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5509 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5511 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5512 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5514 u32 val = serdes_cfg;
5520 tw32_f(MAC_SERDES_CFG, val);
5523 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5525 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5526 tg3_setup_flow_control(tp, 0, 0);
5527 current_link_up = true;
5532 /* Want auto-negotiation. */
5533 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5535 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5536 if (flowctrl & ADVERTISE_1000XPAUSE)
5537 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5538 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5539 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5541 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5542 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5543 tp->serdes_counter &&
5544 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5545 MAC_STATUS_RCVD_CFG)) ==
5546 MAC_STATUS_PCS_SYNCED)) {
5547 tp->serdes_counter--;
5548 current_link_up = true;
5553 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5554 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5556 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5558 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5559 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5560 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5561 MAC_STATUS_SIGNAL_DET)) {
5562 sg_dig_status = tr32(SG_DIG_STATUS);
5563 mac_status = tr32(MAC_STATUS);
5565 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5566 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5567 u32 local_adv = 0, remote_adv = 0;
5569 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5570 local_adv |= ADVERTISE_1000XPAUSE;
5571 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5572 local_adv |= ADVERTISE_1000XPSE_ASYM;
5574 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5575 remote_adv |= LPA_1000XPAUSE;
5576 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5577 remote_adv |= LPA_1000XPAUSE_ASYM;
5579 tp->link_config.rmt_adv =
5580 mii_adv_to_ethtool_adv_x(remote_adv);
5582 tg3_setup_flow_control(tp, local_adv, remote_adv);
5583 current_link_up = true;
5584 tp->serdes_counter = 0;
5585 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5586 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5587 if (tp->serdes_counter)
5588 tp->serdes_counter--;
5591 u32 val = serdes_cfg;
5598 tw32_f(MAC_SERDES_CFG, val);
5601 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5604 /* Link parallel detection - link is up */
5605 /* only if we have PCS_SYNC and not */
5606 /* receiving config code words */
5607 mac_status = tr32(MAC_STATUS);
5608 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5609 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5610 tg3_setup_flow_control(tp, 0, 0);
5611 current_link_up = true;
5613 TG3_PHYFLG_PARALLEL_DETECT;
5614 tp->serdes_counter =
5615 SERDES_PARALLEL_DET_TIMEOUT;
5617 goto restart_autoneg;
5621 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5622 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5626 return current_link_up;
5629 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5631 bool current_link_up = false;
5633 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5636 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5637 u32 txflags, rxflags;
5640 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5641 u32 local_adv = 0, remote_adv = 0;
5643 if (txflags & ANEG_CFG_PS1)
5644 local_adv |= ADVERTISE_1000XPAUSE;
5645 if (txflags & ANEG_CFG_PS2)
5646 local_adv |= ADVERTISE_1000XPSE_ASYM;
5648 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5649 remote_adv |= LPA_1000XPAUSE;
5650 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5651 remote_adv |= LPA_1000XPAUSE_ASYM;
5653 tp->link_config.rmt_adv =
5654 mii_adv_to_ethtool_adv_x(remote_adv);
5656 tg3_setup_flow_control(tp, local_adv, remote_adv);
5658 current_link_up = true;
5660 for (i = 0; i < 30; i++) {
5663 (MAC_STATUS_SYNC_CHANGED |
5664 MAC_STATUS_CFG_CHANGED));
5666 if ((tr32(MAC_STATUS) &
5667 (MAC_STATUS_SYNC_CHANGED |
5668 MAC_STATUS_CFG_CHANGED)) == 0)
5672 mac_status = tr32(MAC_STATUS);
5673 if (!current_link_up &&
5674 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5675 !(mac_status & MAC_STATUS_RCVD_CFG))
5676 current_link_up = true;
5678 tg3_setup_flow_control(tp, 0, 0);
5680 /* Forcing 1000FD link up. */
5681 current_link_up = true;
5683 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5686 tw32_f(MAC_MODE, tp->mac_mode);
5691 return current_link_up;
5694 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5697 u16 orig_active_speed;
5698 u8 orig_active_duplex;
5700 bool current_link_up;
5703 orig_pause_cfg = tp->link_config.active_flowctrl;
5704 orig_active_speed = tp->link_config.active_speed;
5705 orig_active_duplex = tp->link_config.active_duplex;
5707 if (!tg3_flag(tp, HW_AUTONEG) &&
5709 tg3_flag(tp, INIT_COMPLETE)) {
5710 mac_status = tr32(MAC_STATUS);
5711 mac_status &= (MAC_STATUS_PCS_SYNCED |
5712 MAC_STATUS_SIGNAL_DET |
5713 MAC_STATUS_CFG_CHANGED |
5714 MAC_STATUS_RCVD_CFG);
5715 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5716 MAC_STATUS_SIGNAL_DET)) {
5717 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5718 MAC_STATUS_CFG_CHANGED));
5723 tw32_f(MAC_TX_AUTO_NEG, 0);
5725 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5726 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5727 tw32_f(MAC_MODE, tp->mac_mode);
5730 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5731 tg3_init_bcm8002(tp);
5733 /* Enable link change event even when serdes polling. */
5734 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5737 current_link_up = false;
5738 tp->link_config.rmt_adv = 0;
5739 mac_status = tr32(MAC_STATUS);
5741 if (tg3_flag(tp, HW_AUTONEG))
5742 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5744 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5746 tp->napi[0].hw_status->status =
5747 (SD_STATUS_UPDATED |
5748 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5750 for (i = 0; i < 100; i++) {
5751 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5752 MAC_STATUS_CFG_CHANGED));
5754 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5755 MAC_STATUS_CFG_CHANGED |
5756 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5760 mac_status = tr32(MAC_STATUS);
5761 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5762 current_link_up = false;
5763 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5764 tp->serdes_counter == 0) {
5765 tw32_f(MAC_MODE, (tp->mac_mode |
5766 MAC_MODE_SEND_CONFIGS));
5768 tw32_f(MAC_MODE, tp->mac_mode);
5772 if (current_link_up) {
5773 tp->link_config.active_speed = SPEED_1000;
5774 tp->link_config.active_duplex = DUPLEX_FULL;
5775 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5776 LED_CTRL_LNKLED_OVERRIDE |
5777 LED_CTRL_1000MBPS_ON));
5779 tp->link_config.active_speed = SPEED_UNKNOWN;
5780 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5781 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5782 LED_CTRL_LNKLED_OVERRIDE |
5783 LED_CTRL_TRAFFIC_OVERRIDE));
5786 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5787 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5788 if (orig_pause_cfg != now_pause_cfg ||
5789 orig_active_speed != tp->link_config.active_speed ||
5790 orig_active_duplex != tp->link_config.active_duplex)
5791 tg3_link_report(tp);
5797 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5801 u16 current_speed = SPEED_UNKNOWN;
5802 u8 current_duplex = DUPLEX_UNKNOWN;
5803 bool current_link_up = false;
5804 u32 local_adv, remote_adv, sgsr;
5806 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5807 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5808 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5809 (sgsr & SERDES_TG3_SGMII_MODE)) {
5814 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5816 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5817 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5819 current_link_up = true;
5820 if (sgsr & SERDES_TG3_SPEED_1000) {
5821 current_speed = SPEED_1000;
5822 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5823 } else if (sgsr & SERDES_TG3_SPEED_100) {
5824 current_speed = SPEED_100;
5825 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5827 current_speed = SPEED_10;
5828 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5831 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5832 current_duplex = DUPLEX_FULL;
5834 current_duplex = DUPLEX_HALF;
5837 tw32_f(MAC_MODE, tp->mac_mode);
5840 tg3_clear_mac_status(tp);
5842 goto fiber_setup_done;
5845 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5846 tw32_f(MAC_MODE, tp->mac_mode);
5849 tg3_clear_mac_status(tp);
5854 tp->link_config.rmt_adv = 0;
5856 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5857 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5858 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5859 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5860 bmsr |= BMSR_LSTATUS;
5862 bmsr &= ~BMSR_LSTATUS;
5865 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5867 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5868 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5869 /* do nothing, just check for link up at the end */
5870 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5873 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5874 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5875 ADVERTISE_1000XPAUSE |
5876 ADVERTISE_1000XPSE_ASYM |
5879 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5880 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5882 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5883 tg3_writephy(tp, MII_ADVERTISE, newadv);
5884 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5885 tg3_writephy(tp, MII_BMCR, bmcr);
5887 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5888 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5889 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5896 bmcr &= ~BMCR_SPEED1000;
5897 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5899 if (tp->link_config.duplex == DUPLEX_FULL)
5900 new_bmcr |= BMCR_FULLDPLX;
5902 if (new_bmcr != bmcr) {
5903 /* BMCR_SPEED1000 is a reserved bit that needs
5904 * to be set on write.
5906 new_bmcr |= BMCR_SPEED1000;
5908 /* Force a linkdown */
5912 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5913 adv &= ~(ADVERTISE_1000XFULL |
5914 ADVERTISE_1000XHALF |
5916 tg3_writephy(tp, MII_ADVERTISE, adv);
5917 tg3_writephy(tp, MII_BMCR, bmcr |
5921 tg3_carrier_off(tp);
5923 tg3_writephy(tp, MII_BMCR, new_bmcr);
5925 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5926 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5927 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5928 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5929 bmsr |= BMSR_LSTATUS;
5931 bmsr &= ~BMSR_LSTATUS;
5933 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5937 if (bmsr & BMSR_LSTATUS) {
5938 current_speed = SPEED_1000;
5939 current_link_up = true;
5940 if (bmcr & BMCR_FULLDPLX)
5941 current_duplex = DUPLEX_FULL;
5943 current_duplex = DUPLEX_HALF;
5948 if (bmcr & BMCR_ANENABLE) {
5951 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5952 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5953 common = local_adv & remote_adv;
5954 if (common & (ADVERTISE_1000XHALF |
5955 ADVERTISE_1000XFULL)) {
5956 if (common & ADVERTISE_1000XFULL)
5957 current_duplex = DUPLEX_FULL;
5959 current_duplex = DUPLEX_HALF;
5961 tp->link_config.rmt_adv =
5962 mii_adv_to_ethtool_adv_x(remote_adv);
5963 } else if (!tg3_flag(tp, 5780_CLASS)) {
5964 /* Link is up via parallel detect */
5966 current_link_up = false;
5972 if (current_link_up && current_duplex == DUPLEX_FULL)
5973 tg3_setup_flow_control(tp, local_adv, remote_adv);
5975 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5976 if (tp->link_config.active_duplex == DUPLEX_HALF)
5977 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5979 tw32_f(MAC_MODE, tp->mac_mode);
5982 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5984 tp->link_config.active_speed = current_speed;
5985 tp->link_config.active_duplex = current_duplex;
5987 tg3_test_and_report_link_chg(tp, current_link_up);
5991 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5993 if (tp->serdes_counter) {
5994 /* Give autoneg time to complete. */
5995 tp->serdes_counter--;
6000 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6003 tg3_readphy(tp, MII_BMCR, &bmcr);
6004 if (bmcr & BMCR_ANENABLE) {
6007 /* Select shadow register 0x1f */
6008 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6009 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6011 /* Select expansion interrupt status register */
6012 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6013 MII_TG3_DSP_EXP1_INT_STAT);
6014 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6015 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6017 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6018 /* We have signal detect and not receiving
6019 * config code words, link is up by parallel
6023 bmcr &= ~BMCR_ANENABLE;
6024 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6025 tg3_writephy(tp, MII_BMCR, bmcr);
6026 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6029 } else if (tp->link_up &&
6030 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6031 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6034 /* Select expansion interrupt status register */
6035 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6036 MII_TG3_DSP_EXP1_INT_STAT);
6037 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6041 /* Config code words received, turn on autoneg. */
6042 tg3_readphy(tp, MII_BMCR, &bmcr);
6043 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6045 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6051 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6056 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6057 err = tg3_setup_fiber_phy(tp, force_reset);
6058 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6059 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6061 err = tg3_setup_copper_phy(tp, force_reset);
6063 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6066 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6067 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6069 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6074 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6075 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6076 tw32(GRC_MISC_CFG, val);
6079 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6080 (6 << TX_LENGTHS_IPG_SHIFT);
6081 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6082 tg3_asic_rev(tp) == ASIC_REV_5762)
6083 val |= tr32(MAC_TX_LENGTHS) &
6084 (TX_LENGTHS_JMB_FRM_LEN_MSK |
6085 TX_LENGTHS_CNT_DWN_VAL_MSK);
6087 if (tp->link_config.active_speed == SPEED_1000 &&
6088 tp->link_config.active_duplex == DUPLEX_HALF)
6089 tw32(MAC_TX_LENGTHS, val |
6090 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6092 tw32(MAC_TX_LENGTHS, val |
6093 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6095 if (!tg3_flag(tp, 5705_PLUS)) {
6097 tw32(HOSTCC_STAT_COAL_TICKS,
6098 tp->coal.stats_block_coalesce_usecs);
6100 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6104 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6105 val = tr32(PCIE_PWR_MGMT_THRESH);
6107 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6110 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6111 tw32(PCIE_PWR_MGMT_THRESH, val);
6117 /* tp->lock must be held */
6118 static u64 tg3_refclk_read(struct tg3 *tp)
6120 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6121 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6124 /* tp->lock must be held */
6125 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6127 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6129 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6130 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6131 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6132 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6135 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6136 static inline void tg3_full_unlock(struct tg3 *tp);
6137 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6139 struct tg3 *tp = netdev_priv(dev);
6141 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6142 SOF_TIMESTAMPING_RX_SOFTWARE |
6143 SOF_TIMESTAMPING_SOFTWARE;
6145 if (tg3_flag(tp, PTP_CAPABLE)) {
6146 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6147 SOF_TIMESTAMPING_RX_HARDWARE |
6148 SOF_TIMESTAMPING_RAW_HARDWARE;
6152 info->phc_index = ptp_clock_index(tp->ptp_clock);
6154 info->phc_index = -1;
6156 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6158 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6159 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6160 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6161 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6165 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6167 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6168 bool neg_adj = false;
6176 /* Frequency adjustment is performed using hardware with a 24 bit
6177 * accumulator and a programmable correction value. On each clk, the
6178 * correction value gets added to the accumulator and when it
6179 * overflows, the time counter is incremented/decremented.
6181 * So conversion from ppb to correction value is
6182 * ppb * (1 << 24) / 1000000000
6184 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6185 TG3_EAV_REF_CLK_CORRECT_MASK;
6187 tg3_full_lock(tp, 0);
6190 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6191 TG3_EAV_REF_CLK_CORRECT_EN |
6192 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6194 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6196 tg3_full_unlock(tp);
6201 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6203 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6205 tg3_full_lock(tp, 0);
6206 tp->ptp_adjust += delta;
6207 tg3_full_unlock(tp);
6212 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
6215 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6217 tg3_full_lock(tp, 0);
6218 ns = tg3_refclk_read(tp);
6219 ns += tp->ptp_adjust;
6220 tg3_full_unlock(tp);
6222 *ts = ns_to_timespec64(ns);
6227 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6228 const struct timespec64 *ts)
6231 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6233 ns = timespec64_to_ns(ts);
6235 tg3_full_lock(tp, 0);
6236 tg3_refclk_write(tp, ns);
6238 tg3_full_unlock(tp);
6243 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6244 struct ptp_clock_request *rq, int on)
6246 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6251 case PTP_CLK_REQ_PEROUT:
6252 if (rq->perout.index != 0)
6255 tg3_full_lock(tp, 0);
6256 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6257 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6262 nsec = rq->perout.start.sec * 1000000000ULL +
6263 rq->perout.start.nsec;
6265 if (rq->perout.period.sec || rq->perout.period.nsec) {
6266 netdev_warn(tp->dev,
6267 "Device supports only a one-shot timesync output, period must be 0\n");
6272 if (nsec & (1ULL << 63)) {
6273 netdev_warn(tp->dev,
6274 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6279 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6280 tw32(TG3_EAV_WATCHDOG0_MSB,
6281 TG3_EAV_WATCHDOG0_EN |
6282 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6284 tw32(TG3_EAV_REF_CLCK_CTL,
6285 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6287 tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6288 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6292 tg3_full_unlock(tp);
6302 static const struct ptp_clock_info tg3_ptp_caps = {
6303 .owner = THIS_MODULE,
6304 .name = "tg3 clock",
6305 .max_adj = 250000000,
6311 .adjfreq = tg3_ptp_adjfreq,
6312 .adjtime = tg3_ptp_adjtime,
6313 .gettime64 = tg3_ptp_gettime,
6314 .settime64 = tg3_ptp_settime,
6315 .enable = tg3_ptp_enable,
6318 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6319 struct skb_shared_hwtstamps *timestamp)
6321 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6322 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6326 /* tp->lock must be held */
6327 static void tg3_ptp_init(struct tg3 *tp)
6329 if (!tg3_flag(tp, PTP_CAPABLE))
6332 /* Initialize the hardware clock to the system time. */
6333 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6335 tp->ptp_info = tg3_ptp_caps;
6338 /* tp->lock must be held */
6339 static void tg3_ptp_resume(struct tg3 *tp)
6341 if (!tg3_flag(tp, PTP_CAPABLE))
6344 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6348 static void tg3_ptp_fini(struct tg3 *tp)
6350 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6353 ptp_clock_unregister(tp->ptp_clock);
6354 tp->ptp_clock = NULL;
6358 static inline int tg3_irq_sync(struct tg3 *tp)
6360 return tp->irq_sync;
6363 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6367 dst = (u32 *)((u8 *)dst + off);
6368 for (i = 0; i < len; i += sizeof(u32))
6369 *dst++ = tr32(off + i);
6372 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6374 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6375 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6376 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6377 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6378 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6379 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6380 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6381 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6382 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6383 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6384 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6385 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6386 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6387 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6388 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6389 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6390 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6391 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6392 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6394 if (tg3_flag(tp, SUPPORT_MSIX))
6395 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6397 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6398 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6399 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6400 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6401 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6402 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6403 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6404 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6406 if (!tg3_flag(tp, 5705_PLUS)) {
6407 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6408 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6409 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6412 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6413 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6414 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6415 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6416 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6418 if (tg3_flag(tp, NVRAM))
6419 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6422 static void tg3_dump_state(struct tg3 *tp)
6427 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6431 if (tg3_flag(tp, PCI_EXPRESS)) {
6432 /* Read up to but not including private PCI registers */
6433 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6434 regs[i / sizeof(u32)] = tr32(i);
6436 tg3_dump_legacy_regs(tp, regs);
6438 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6439 if (!regs[i + 0] && !regs[i + 1] &&
6440 !regs[i + 2] && !regs[i + 3])
6443 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6445 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6450 for (i = 0; i < tp->irq_cnt; i++) {
6451 struct tg3_napi *tnapi = &tp->napi[i];
6453 /* SW status block */
6455 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6457 tnapi->hw_status->status,
6458 tnapi->hw_status->status_tag,
6459 tnapi->hw_status->rx_jumbo_consumer,
6460 tnapi->hw_status->rx_consumer,
6461 tnapi->hw_status->rx_mini_consumer,
6462 tnapi->hw_status->idx[0].rx_producer,
6463 tnapi->hw_status->idx[0].tx_consumer);
6466 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6468 tnapi->last_tag, tnapi->last_irq_tag,
6469 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6471 tnapi->prodring.rx_std_prod_idx,
6472 tnapi->prodring.rx_std_cons_idx,
6473 tnapi->prodring.rx_jmb_prod_idx,
6474 tnapi->prodring.rx_jmb_cons_idx);
6478 /* This is called whenever we suspect that the system chipset is re-
6479 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6480 * is bogus tx completions. We try to recover by setting the
6481 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6484 static void tg3_tx_recover(struct tg3 *tp)
6486 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6487 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6489 netdev_warn(tp->dev,
6490 "The system may be re-ordering memory-mapped I/O "
6491 "cycles to the network device, attempting to recover. "
6492 "Please report the problem to the driver maintainer "
6493 "and include system chipset information.\n");
6495 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6498 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6500 /* Tell compiler to fetch tx indices from memory. */
6502 return tnapi->tx_pending -
6503 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6506 /* Tigon3 never reports partial packet sends. So we do not
6507 * need special logic to handle SKBs that have not had all
6508 * of their frags sent yet, like SunGEM does.
6510 static void tg3_tx(struct tg3_napi *tnapi)
6512 struct tg3 *tp = tnapi->tp;
6513 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6514 u32 sw_idx = tnapi->tx_cons;
6515 struct netdev_queue *txq;
6516 int index = tnapi - tp->napi;
6517 unsigned int pkts_compl = 0, bytes_compl = 0;
6519 if (tg3_flag(tp, ENABLE_TSS))
6522 txq = netdev_get_tx_queue(tp->dev, index);
6524 while (sw_idx != hw_idx) {
6525 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6526 struct sk_buff *skb = ri->skb;
6529 if (unlikely(skb == NULL)) {
6534 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6535 struct skb_shared_hwtstamps timestamp;
6536 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6537 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6539 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6541 skb_tstamp_tx(skb, ×tamp);
6544 pci_unmap_single(tp->pdev,
6545 dma_unmap_addr(ri, mapping),
6551 while (ri->fragmented) {
6552 ri->fragmented = false;
6553 sw_idx = NEXT_TX(sw_idx);
6554 ri = &tnapi->tx_buffers[sw_idx];
6557 sw_idx = NEXT_TX(sw_idx);
6559 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6560 ri = &tnapi->tx_buffers[sw_idx];
6561 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6564 pci_unmap_page(tp->pdev,
6565 dma_unmap_addr(ri, mapping),
6566 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6569 while (ri->fragmented) {
6570 ri->fragmented = false;
6571 sw_idx = NEXT_TX(sw_idx);
6572 ri = &tnapi->tx_buffers[sw_idx];
6575 sw_idx = NEXT_TX(sw_idx);
6579 bytes_compl += skb->len;
6581 dev_consume_skb_any(skb);
6583 if (unlikely(tx_bug)) {
6589 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6591 tnapi->tx_cons = sw_idx;
6593 /* Need to make the tx_cons update visible to tg3_start_xmit()
6594 * before checking for netif_queue_stopped(). Without the
6595 * memory barrier, there is a small possibility that tg3_start_xmit()
6596 * will miss it and cause the queue to be stopped forever.
6600 if (unlikely(netif_tx_queue_stopped(txq) &&
6601 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6602 __netif_tx_lock(txq, smp_processor_id());
6603 if (netif_tx_queue_stopped(txq) &&
6604 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6605 netif_tx_wake_queue(txq);
6606 __netif_tx_unlock(txq);
6610 static void tg3_frag_free(bool is_frag, void *data)
6613 skb_free_frag(data);
6618 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6620 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6621 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6626 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6627 map_sz, PCI_DMA_FROMDEVICE);
6628 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6633 /* Returns size of skb allocated or < 0 on error.
6635 * We only need to fill in the address because the other members
6636 * of the RX descriptor are invariant, see tg3_init_rings.
6638 * Note the purposeful assymetry of cpu vs. chip accesses. For
6639 * posting buffers we only dirty the first cache line of the RX
6640 * descriptor (containing the address). Whereas for the RX status
6641 * buffers the cpu only reads the last cacheline of the RX descriptor
6642 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6644 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6645 u32 opaque_key, u32 dest_idx_unmasked,
6646 unsigned int *frag_size)
6648 struct tg3_rx_buffer_desc *desc;
6649 struct ring_info *map;
6652 int skb_size, data_size, dest_idx;
6654 switch (opaque_key) {
6655 case RXD_OPAQUE_RING_STD:
6656 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6657 desc = &tpr->rx_std[dest_idx];
6658 map = &tpr->rx_std_buffers[dest_idx];
6659 data_size = tp->rx_pkt_map_sz;
6662 case RXD_OPAQUE_RING_JUMBO:
6663 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6664 desc = &tpr->rx_jmb[dest_idx].std;
6665 map = &tpr->rx_jmb_buffers[dest_idx];
6666 data_size = TG3_RX_JMB_MAP_SZ;
6673 /* Do not overwrite any of the map or rp information
6674 * until we are sure we can commit to a new buffer.
6676 * Callers depend upon this behavior and assume that
6677 * we leave everything unchanged if we fail.
6679 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6680 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6681 if (skb_size <= PAGE_SIZE) {
6682 data = netdev_alloc_frag(skb_size);
6683 *frag_size = skb_size;
6685 data = kmalloc(skb_size, GFP_ATOMIC);
6691 mapping = pci_map_single(tp->pdev,
6692 data + TG3_RX_OFFSET(tp),
6694 PCI_DMA_FROMDEVICE);
6695 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6696 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6701 dma_unmap_addr_set(map, mapping, mapping);
6703 desc->addr_hi = ((u64)mapping >> 32);
6704 desc->addr_lo = ((u64)mapping & 0xffffffff);
6709 /* We only need to move over in the address because the other
6710 * members of the RX descriptor are invariant. See notes above
6711 * tg3_alloc_rx_data for full details.
6713 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6714 struct tg3_rx_prodring_set *dpr,
6715 u32 opaque_key, int src_idx,
6716 u32 dest_idx_unmasked)
6718 struct tg3 *tp = tnapi->tp;
6719 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6720 struct ring_info *src_map, *dest_map;
6721 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6724 switch (opaque_key) {
6725 case RXD_OPAQUE_RING_STD:
6726 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6727 dest_desc = &dpr->rx_std[dest_idx];
6728 dest_map = &dpr->rx_std_buffers[dest_idx];
6729 src_desc = &spr->rx_std[src_idx];
6730 src_map = &spr->rx_std_buffers[src_idx];
6733 case RXD_OPAQUE_RING_JUMBO:
6734 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6735 dest_desc = &dpr->rx_jmb[dest_idx].std;
6736 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6737 src_desc = &spr->rx_jmb[src_idx].std;
6738 src_map = &spr->rx_jmb_buffers[src_idx];
6745 dest_map->data = src_map->data;
6746 dma_unmap_addr_set(dest_map, mapping,
6747 dma_unmap_addr(src_map, mapping));
6748 dest_desc->addr_hi = src_desc->addr_hi;
6749 dest_desc->addr_lo = src_desc->addr_lo;
6751 /* Ensure that the update to the skb happens after the physical
6752 * addresses have been transferred to the new BD location.
6756 src_map->data = NULL;
6759 /* The RX ring scheme is composed of multiple rings which post fresh
6760 * buffers to the chip, and one special ring the chip uses to report
6761 * status back to the host.
6763 * The special ring reports the status of received packets to the
6764 * host. The chip does not write into the original descriptor the
6765 * RX buffer was obtained from. The chip simply takes the original
6766 * descriptor as provided by the host, updates the status and length
6767 * field, then writes this into the next status ring entry.
6769 * Each ring the host uses to post buffers to the chip is described
6770 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6771 * it is first placed into the on-chip ram. When the packet's length
6772 * is known, it walks down the TG3_BDINFO entries to select the ring.
6773 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6774 * which is within the range of the new packet's length is chosen.
6776 * The "separate ring for rx status" scheme may sound queer, but it makes
6777 * sense from a cache coherency perspective. If only the host writes
6778 * to the buffer post rings, and only the chip writes to the rx status
6779 * rings, then cache lines never move beyond shared-modified state.
6780 * If both the host and chip were to write into the same ring, cache line
6781 * eviction could occur since both entities want it in an exclusive state.
6783 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6785 struct tg3 *tp = tnapi->tp;
6786 u32 work_mask, rx_std_posted = 0;
6787 u32 std_prod_idx, jmb_prod_idx;
6788 u32 sw_idx = tnapi->rx_rcb_ptr;
6791 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6793 hw_idx = *(tnapi->rx_rcb_prod_idx);
6795 * We need to order the read of hw_idx and the read of
6796 * the opaque cookie.
6801 std_prod_idx = tpr->rx_std_prod_idx;
6802 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6803 while (sw_idx != hw_idx && budget > 0) {
6804 struct ring_info *ri;
6805 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6807 struct sk_buff *skb;
6808 dma_addr_t dma_addr;
6809 u32 opaque_key, desc_idx, *post_ptr;
6813 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6814 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6815 if (opaque_key == RXD_OPAQUE_RING_STD) {
6816 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6817 dma_addr = dma_unmap_addr(ri, mapping);
6819 post_ptr = &std_prod_idx;
6821 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6822 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6823 dma_addr = dma_unmap_addr(ri, mapping);
6825 post_ptr = &jmb_prod_idx;
6827 goto next_pkt_nopost;
6829 work_mask |= opaque_key;
6831 if (desc->err_vlan & RXD_ERR_MASK) {
6833 tg3_recycle_rx(tnapi, tpr, opaque_key,
6834 desc_idx, *post_ptr);
6836 /* Other statistics kept track of by card. */
6841 prefetch(data + TG3_RX_OFFSET(tp));
6842 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6845 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6846 RXD_FLAG_PTPSTAT_PTPV1 ||
6847 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6848 RXD_FLAG_PTPSTAT_PTPV2) {
6849 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6850 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6853 if (len > TG3_RX_COPY_THRESH(tp)) {
6855 unsigned int frag_size;
6857 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6858 *post_ptr, &frag_size);
6862 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6863 PCI_DMA_FROMDEVICE);
6865 /* Ensure that the update to the data happens
6866 * after the usage of the old DMA mapping.
6872 skb = build_skb(data, frag_size);
6874 tg3_frag_free(frag_size != 0, data);
6875 goto drop_it_no_recycle;
6877 skb_reserve(skb, TG3_RX_OFFSET(tp));
6879 tg3_recycle_rx(tnapi, tpr, opaque_key,
6880 desc_idx, *post_ptr);
6882 skb = netdev_alloc_skb(tp->dev,
6883 len + TG3_RAW_IP_ALIGN);
6885 goto drop_it_no_recycle;
6887 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6888 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6890 data + TG3_RX_OFFSET(tp),
6892 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6897 tg3_hwclock_to_timestamp(tp, tstamp,
6898 skb_hwtstamps(skb));
6900 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6901 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6902 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6903 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6904 skb->ip_summed = CHECKSUM_UNNECESSARY;
6906 skb_checksum_none_assert(skb);
6908 skb->protocol = eth_type_trans(skb, tp->dev);
6910 if (len > (tp->dev->mtu + ETH_HLEN) &&
6911 skb->protocol != htons(ETH_P_8021Q) &&
6912 skb->protocol != htons(ETH_P_8021AD)) {
6913 dev_kfree_skb_any(skb);
6914 goto drop_it_no_recycle;
6917 if (desc->type_flags & RXD_FLAG_VLAN &&
6918 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6919 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6920 desc->err_vlan & RXD_VLAN_MASK);
6922 napi_gro_receive(&tnapi->napi, skb);
6930 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6931 tpr->rx_std_prod_idx = std_prod_idx &
6932 tp->rx_std_ring_mask;
6933 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6934 tpr->rx_std_prod_idx);
6935 work_mask &= ~RXD_OPAQUE_RING_STD;
6940 sw_idx &= tp->rx_ret_ring_mask;
6942 /* Refresh hw_idx to see if there is new work */
6943 if (sw_idx == hw_idx) {
6944 hw_idx = *(tnapi->rx_rcb_prod_idx);
6949 /* ACK the status ring. */
6950 tnapi->rx_rcb_ptr = sw_idx;
6951 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6953 /* Refill RX ring(s). */
6954 if (!tg3_flag(tp, ENABLE_RSS)) {
6955 /* Sync BD data before updating mailbox */
6958 if (work_mask & RXD_OPAQUE_RING_STD) {
6959 tpr->rx_std_prod_idx = std_prod_idx &
6960 tp->rx_std_ring_mask;
6961 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6962 tpr->rx_std_prod_idx);
6964 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6965 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6966 tp->rx_jmb_ring_mask;
6967 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6968 tpr->rx_jmb_prod_idx);
6971 } else if (work_mask) {
6972 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6973 * updated before the producer indices can be updated.
6977 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6978 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6980 if (tnapi != &tp->napi[1]) {
6981 tp->rx_refill = true;
6982 napi_schedule(&tp->napi[1].napi);
6989 static void tg3_poll_link(struct tg3 *tp)
6991 /* handle link change and other phy events */
6992 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6993 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6995 if (sblk->status & SD_STATUS_LINK_CHG) {
6996 sblk->status = SD_STATUS_UPDATED |
6997 (sblk->status & ~SD_STATUS_LINK_CHG);
6998 spin_lock(&tp->lock);
6999 if (tg3_flag(tp, USE_PHYLIB)) {
7001 (MAC_STATUS_SYNC_CHANGED |
7002 MAC_STATUS_CFG_CHANGED |
7003 MAC_STATUS_MI_COMPLETION |
7004 MAC_STATUS_LNKSTATE_CHANGED));
7007 tg3_setup_phy(tp, false);
7008 spin_unlock(&tp->lock);
7013 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7014 struct tg3_rx_prodring_set *dpr,
7015 struct tg3_rx_prodring_set *spr)
7017 u32 si, di, cpycnt, src_prod_idx;
7021 src_prod_idx = spr->rx_std_prod_idx;
7023 /* Make sure updates to the rx_std_buffers[] entries and the
7024 * standard producer index are seen in the correct order.
7028 if (spr->rx_std_cons_idx == src_prod_idx)
7031 if (spr->rx_std_cons_idx < src_prod_idx)
7032 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7034 cpycnt = tp->rx_std_ring_mask + 1 -
7035 spr->rx_std_cons_idx;
7037 cpycnt = min(cpycnt,
7038 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7040 si = spr->rx_std_cons_idx;
7041 di = dpr->rx_std_prod_idx;
7043 for (i = di; i < di + cpycnt; i++) {
7044 if (dpr->rx_std_buffers[i].data) {
7054 /* Ensure that updates to the rx_std_buffers ring and the
7055 * shadowed hardware producer ring from tg3_recycle_skb() are
7056 * ordered correctly WRT the skb check above.
7060 memcpy(&dpr->rx_std_buffers[di],
7061 &spr->rx_std_buffers[si],
7062 cpycnt * sizeof(struct ring_info));
7064 for (i = 0; i < cpycnt; i++, di++, si++) {
7065 struct tg3_rx_buffer_desc *sbd, *dbd;
7066 sbd = &spr->rx_std[si];
7067 dbd = &dpr->rx_std[di];
7068 dbd->addr_hi = sbd->addr_hi;
7069 dbd->addr_lo = sbd->addr_lo;
7072 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7073 tp->rx_std_ring_mask;
7074 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7075 tp->rx_std_ring_mask;
7079 src_prod_idx = spr->rx_jmb_prod_idx;
7081 /* Make sure updates to the rx_jmb_buffers[] entries and
7082 * the jumbo producer index are seen in the correct order.
7086 if (spr->rx_jmb_cons_idx == src_prod_idx)
7089 if (spr->rx_jmb_cons_idx < src_prod_idx)
7090 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7092 cpycnt = tp->rx_jmb_ring_mask + 1 -
7093 spr->rx_jmb_cons_idx;
7095 cpycnt = min(cpycnt,
7096 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7098 si = spr->rx_jmb_cons_idx;
7099 di = dpr->rx_jmb_prod_idx;
7101 for (i = di; i < di + cpycnt; i++) {
7102 if (dpr->rx_jmb_buffers[i].data) {
7112 /* Ensure that updates to the rx_jmb_buffers ring and the
7113 * shadowed hardware producer ring from tg3_recycle_skb() are
7114 * ordered correctly WRT the skb check above.
7118 memcpy(&dpr->rx_jmb_buffers[di],
7119 &spr->rx_jmb_buffers[si],
7120 cpycnt * sizeof(struct ring_info));
7122 for (i = 0; i < cpycnt; i++, di++, si++) {
7123 struct tg3_rx_buffer_desc *sbd, *dbd;
7124 sbd = &spr->rx_jmb[si].std;
7125 dbd = &dpr->rx_jmb[di].std;
7126 dbd->addr_hi = sbd->addr_hi;
7127 dbd->addr_lo = sbd->addr_lo;
7130 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7131 tp->rx_jmb_ring_mask;
7132 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7133 tp->rx_jmb_ring_mask;
7139 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7141 struct tg3 *tp = tnapi->tp;
7143 /* run TX completion thread */
7144 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7146 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7150 if (!tnapi->rx_rcb_prod_idx)
7153 /* run RX thread, within the bounds set by NAPI.
7154 * All RX "locking" is done by ensuring outside
7155 * code synchronizes with tg3->napi.poll()
7157 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7158 work_done += tg3_rx(tnapi, budget - work_done);
7160 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7161 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7163 u32 std_prod_idx = dpr->rx_std_prod_idx;
7164 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7166 tp->rx_refill = false;
7167 for (i = 1; i <= tp->rxq_cnt; i++)
7168 err |= tg3_rx_prodring_xfer(tp, dpr,
7169 &tp->napi[i].prodring);
7173 if (std_prod_idx != dpr->rx_std_prod_idx)
7174 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7175 dpr->rx_std_prod_idx);
7177 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7178 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7179 dpr->rx_jmb_prod_idx);
7184 tw32_f(HOSTCC_MODE, tp->coal_now);
7190 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7192 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7193 schedule_work(&tp->reset_task);
7196 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7198 if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7199 cancel_work_sync(&tp->reset_task);
7200 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7203 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7205 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7206 struct tg3 *tp = tnapi->tp;
7208 struct tg3_hw_status *sblk = tnapi->hw_status;
7211 work_done = tg3_poll_work(tnapi, work_done, budget);
7213 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7216 if (unlikely(work_done >= budget))
7219 /* tp->last_tag is used in tg3_int_reenable() below
7220 * to tell the hw how much work has been processed,
7221 * so we must read it before checking for more work.
7223 tnapi->last_tag = sblk->status_tag;
7224 tnapi->last_irq_tag = tnapi->last_tag;
7227 /* check for RX/TX work to do */
7228 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7229 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7231 /* This test here is not race free, but will reduce
7232 * the number of interrupts by looping again.
7234 if (tnapi == &tp->napi[1] && tp->rx_refill)
7237 napi_complete_done(napi, work_done);
7238 /* Reenable interrupts. */
7239 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7241 /* This test here is synchronized by napi_schedule()
7242 * and napi_complete() to close the race condition.
7244 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7245 tw32(HOSTCC_MODE, tp->coalesce_mode |
7246 HOSTCC_MODE_ENABLE |
7257 /* work_done is guaranteed to be less than budget. */
7258 napi_complete(napi);
7259 tg3_reset_task_schedule(tp);
7263 static void tg3_process_error(struct tg3 *tp)
7266 bool real_error = false;
7268 if (tg3_flag(tp, ERROR_PROCESSED))
7271 /* Check Flow Attention register */
7272 val = tr32(HOSTCC_FLOW_ATTN);
7273 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7274 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7278 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7279 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7283 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7284 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7293 tg3_flag_set(tp, ERROR_PROCESSED);
7294 tg3_reset_task_schedule(tp);
7297 static int tg3_poll(struct napi_struct *napi, int budget)
7299 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7300 struct tg3 *tp = tnapi->tp;
7302 struct tg3_hw_status *sblk = tnapi->hw_status;
7305 if (sblk->status & SD_STATUS_ERROR)
7306 tg3_process_error(tp);
7310 work_done = tg3_poll_work(tnapi, work_done, budget);
7312 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7315 if (unlikely(work_done >= budget))
7318 if (tg3_flag(tp, TAGGED_STATUS)) {
7319 /* tp->last_tag is used in tg3_int_reenable() below
7320 * to tell the hw how much work has been processed,
7321 * so we must read it before checking for more work.
7323 tnapi->last_tag = sblk->status_tag;
7324 tnapi->last_irq_tag = tnapi->last_tag;
7327 sblk->status &= ~SD_STATUS_UPDATED;
7329 if (likely(!tg3_has_work(tnapi))) {
7330 napi_complete_done(napi, work_done);
7331 tg3_int_reenable(tnapi);
7339 /* work_done is guaranteed to be less than budget. */
7340 napi_complete(napi);
7341 tg3_reset_task_schedule(tp);
7345 static void tg3_napi_disable(struct tg3 *tp)
7349 for (i = tp->irq_cnt - 1; i >= 0; i--)
7350 napi_disable(&tp->napi[i].napi);
7353 static void tg3_napi_enable(struct tg3 *tp)
7357 for (i = 0; i < tp->irq_cnt; i++)
7358 napi_enable(&tp->napi[i].napi);
7361 static void tg3_napi_init(struct tg3 *tp)
7365 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7366 for (i = 1; i < tp->irq_cnt; i++)
7367 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7370 static void tg3_napi_fini(struct tg3 *tp)
7374 for (i = 0; i < tp->irq_cnt; i++)
7375 netif_napi_del(&tp->napi[i].napi);
7378 static inline void tg3_netif_stop(struct tg3 *tp)
7380 netif_trans_update(tp->dev); /* prevent tx timeout */
7381 tg3_napi_disable(tp);
7382 netif_carrier_off(tp->dev);
7383 netif_tx_disable(tp->dev);
7386 /* tp->lock must be held */
7387 static inline void tg3_netif_start(struct tg3 *tp)
7391 /* NOTE: unconditional netif_tx_wake_all_queues is only
7392 * appropriate so long as all callers are assured to
7393 * have free tx slots (such as after tg3_init_hw)
7395 netif_tx_wake_all_queues(tp->dev);
7398 netif_carrier_on(tp->dev);
7400 tg3_napi_enable(tp);
7401 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7402 tg3_enable_ints(tp);
7405 static void tg3_irq_quiesce(struct tg3 *tp)
7406 __releases(tp->lock)
7407 __acquires(tp->lock)
7411 BUG_ON(tp->irq_sync);
7416 spin_unlock_bh(&tp->lock);
7418 for (i = 0; i < tp->irq_cnt; i++)
7419 synchronize_irq(tp->napi[i].irq_vec);
7421 spin_lock_bh(&tp->lock);
7424 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7425 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7426 * with as well. Most of the time, this is not necessary except when
7427 * shutting down the device.
7429 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7431 spin_lock_bh(&tp->lock);
7433 tg3_irq_quiesce(tp);
7436 static inline void tg3_full_unlock(struct tg3 *tp)
7438 spin_unlock_bh(&tp->lock);
7441 /* One-shot MSI handler - Chip automatically disables interrupt
7442 * after sending MSI so driver doesn't have to do it.
7444 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7446 struct tg3_napi *tnapi = dev_id;
7447 struct tg3 *tp = tnapi->tp;
7449 prefetch(tnapi->hw_status);
7451 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7453 if (likely(!tg3_irq_sync(tp)))
7454 napi_schedule(&tnapi->napi);
7459 /* MSI ISR - No need to check for interrupt sharing and no need to
7460 * flush status block and interrupt mailbox. PCI ordering rules
7461 * guarantee that MSI will arrive after the status block.
7463 static irqreturn_t tg3_msi(int irq, void *dev_id)
7465 struct tg3_napi *tnapi = dev_id;
7466 struct tg3 *tp = tnapi->tp;
7468 prefetch(tnapi->hw_status);
7470 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7472 * Writing any value to intr-mbox-0 clears PCI INTA# and
7473 * chip-internal interrupt pending events.
7474 * Writing non-zero to intr-mbox-0 additional tells the
7475 * NIC to stop sending us irqs, engaging "in-intr-handler"
7478 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7479 if (likely(!tg3_irq_sync(tp)))
7480 napi_schedule(&tnapi->napi);
7482 return IRQ_RETVAL(1);
7485 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7487 struct tg3_napi *tnapi = dev_id;
7488 struct tg3 *tp = tnapi->tp;
7489 struct tg3_hw_status *sblk = tnapi->hw_status;
7490 unsigned int handled = 1;
7492 /* In INTx mode, it is possible for the interrupt to arrive at
7493 * the CPU before the status block posted prior to the interrupt.
7494 * Reading the PCI State register will confirm whether the
7495 * interrupt is ours and will flush the status block.
7497 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7498 if (tg3_flag(tp, CHIP_RESETTING) ||
7499 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7506 * Writing any value to intr-mbox-0 clears PCI INTA# and
7507 * chip-internal interrupt pending events.
7508 * Writing non-zero to intr-mbox-0 additional tells the
7509 * NIC to stop sending us irqs, engaging "in-intr-handler"
7512 * Flush the mailbox to de-assert the IRQ immediately to prevent
7513 * spurious interrupts. The flush impacts performance but
7514 * excessive spurious interrupts can be worse in some cases.
7516 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7517 if (tg3_irq_sync(tp))
7519 sblk->status &= ~SD_STATUS_UPDATED;
7520 if (likely(tg3_has_work(tnapi))) {
7521 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7522 napi_schedule(&tnapi->napi);
7524 /* No work, shared interrupt perhaps? re-enable
7525 * interrupts, and flush that PCI write
7527 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7531 return IRQ_RETVAL(handled);
7534 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7536 struct tg3_napi *tnapi = dev_id;
7537 struct tg3 *tp = tnapi->tp;
7538 struct tg3_hw_status *sblk = tnapi->hw_status;
7539 unsigned int handled = 1;
7541 /* In INTx mode, it is possible for the interrupt to arrive at
7542 * the CPU before the status block posted prior to the interrupt.
7543 * Reading the PCI State register will confirm whether the
7544 * interrupt is ours and will flush the status block.
7546 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7547 if (tg3_flag(tp, CHIP_RESETTING) ||
7548 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7555 * writing any value to intr-mbox-0 clears PCI INTA# and
7556 * chip-internal interrupt pending events.
7557 * writing non-zero to intr-mbox-0 additional tells the
7558 * NIC to stop sending us irqs, engaging "in-intr-handler"
7561 * Flush the mailbox to de-assert the IRQ immediately to prevent
7562 * spurious interrupts. The flush impacts performance but
7563 * excessive spurious interrupts can be worse in some cases.
7565 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7568 * In a shared interrupt configuration, sometimes other devices'
7569 * interrupts will scream. We record the current status tag here
7570 * so that the above check can report that the screaming interrupts
7571 * are unhandled. Eventually they will be silenced.
7573 tnapi->last_irq_tag = sblk->status_tag;
7575 if (tg3_irq_sync(tp))
7578 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7580 napi_schedule(&tnapi->napi);
7583 return IRQ_RETVAL(handled);
7586 /* ISR for interrupt test */
7587 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7589 struct tg3_napi *tnapi = dev_id;
7590 struct tg3 *tp = tnapi->tp;
7591 struct tg3_hw_status *sblk = tnapi->hw_status;
7593 if ((sblk->status & SD_STATUS_UPDATED) ||
7594 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7595 tg3_disable_ints(tp);
7596 return IRQ_RETVAL(1);
7598 return IRQ_RETVAL(0);
7601 #ifdef CONFIG_NET_POLL_CONTROLLER
7602 static void tg3_poll_controller(struct net_device *dev)
7605 struct tg3 *tp = netdev_priv(dev);
7607 if (tg3_irq_sync(tp))
7610 for (i = 0; i < tp->irq_cnt; i++)
7611 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7615 static void tg3_tx_timeout(struct net_device *dev)
7617 struct tg3 *tp = netdev_priv(dev);
7619 if (netif_msg_tx_err(tp)) {
7620 netdev_err(dev, "transmit timed out, resetting\n");
7624 tg3_reset_task_schedule(tp);
7627 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7628 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7630 u32 base = (u32) mapping & 0xffffffff;
7632 return base + len + 8 < base;
7635 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7636 * of any 4GB boundaries: 4G, 8G, etc
7638 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7641 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7642 u32 base = (u32) mapping & 0xffffffff;
7644 return ((base + len + (mss & 0x3fff)) < base);
7649 /* Test for DMA addresses > 40-bit */
7650 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7653 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7654 if (tg3_flag(tp, 40BIT_DMA_BUG))
7655 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7662 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7663 dma_addr_t mapping, u32 len, u32 flags,
7666 txbd->addr_hi = ((u64) mapping >> 32);
7667 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7668 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7669 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7672 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7673 dma_addr_t map, u32 len, u32 flags,
7676 struct tg3 *tp = tnapi->tp;
7679 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7682 if (tg3_4g_overflow_test(map, len))
7685 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7688 if (tg3_40bit_overflow_test(tp, map, len))
7691 if (tp->dma_limit) {
7692 u32 prvidx = *entry;
7693 u32 tmp_flag = flags & ~TXD_FLAG_END;
7694 while (len > tp->dma_limit && *budget) {
7695 u32 frag_len = tp->dma_limit;
7696 len -= tp->dma_limit;
7698 /* Avoid the 8byte DMA problem */
7700 len += tp->dma_limit / 2;
7701 frag_len = tp->dma_limit / 2;
7704 tnapi->tx_buffers[*entry].fragmented = true;
7706 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7707 frag_len, tmp_flag, mss, vlan);
7710 *entry = NEXT_TX(*entry);
7717 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7718 len, flags, mss, vlan);
7720 *entry = NEXT_TX(*entry);
7723 tnapi->tx_buffers[prvidx].fragmented = false;
7727 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7728 len, flags, mss, vlan);
7729 *entry = NEXT_TX(*entry);
7735 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7738 struct sk_buff *skb;
7739 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7744 pci_unmap_single(tnapi->tp->pdev,
7745 dma_unmap_addr(txb, mapping),
7749 while (txb->fragmented) {
7750 txb->fragmented = false;
7751 entry = NEXT_TX(entry);
7752 txb = &tnapi->tx_buffers[entry];
7755 for (i = 0; i <= last; i++) {
7756 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7758 entry = NEXT_TX(entry);
7759 txb = &tnapi->tx_buffers[entry];
7761 pci_unmap_page(tnapi->tp->pdev,
7762 dma_unmap_addr(txb, mapping),
7763 skb_frag_size(frag), PCI_DMA_TODEVICE);
7765 while (txb->fragmented) {
7766 txb->fragmented = false;
7767 entry = NEXT_TX(entry);
7768 txb = &tnapi->tx_buffers[entry];
7773 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7774 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7775 struct sk_buff **pskb,
7776 u32 *entry, u32 *budget,
7777 u32 base_flags, u32 mss, u32 vlan)
7779 struct tg3 *tp = tnapi->tp;
7780 struct sk_buff *new_skb, *skb = *pskb;
7781 dma_addr_t new_addr = 0;
7784 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7785 new_skb = skb_copy(skb, GFP_ATOMIC);
7787 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7789 new_skb = skb_copy_expand(skb,
7790 skb_headroom(skb) + more_headroom,
7791 skb_tailroom(skb), GFP_ATOMIC);
7797 /* New SKB is guaranteed to be linear. */
7798 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7800 /* Make sure the mapping succeeded */
7801 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7802 dev_kfree_skb_any(new_skb);
7805 u32 save_entry = *entry;
7807 base_flags |= TXD_FLAG_END;
7809 tnapi->tx_buffers[*entry].skb = new_skb;
7810 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7813 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7814 new_skb->len, base_flags,
7816 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7817 dev_kfree_skb_any(new_skb);
7823 dev_consume_skb_any(skb);
7828 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7830 /* Check if we will never have enough descriptors,
7831 * as gso_segs can be more than current ring size
7833 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7836 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7838 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7839 * indicated in tg3_tx_frag_set()
7841 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7842 struct netdev_queue *txq, struct sk_buff *skb)
7844 struct sk_buff *segs, *nskb;
7845 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7847 /* Estimate the number of fragments in the worst case */
7848 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7849 netif_tx_stop_queue(txq);
7851 /* netif_tx_stop_queue() must be done before checking
7852 * checking tx index in tg3_tx_avail() below, because in
7853 * tg3_tx(), we update tx index before checking for
7854 * netif_tx_queue_stopped().
7857 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7858 return NETDEV_TX_BUSY;
7860 netif_tx_wake_queue(txq);
7863 segs = skb_gso_segment(skb, tp->dev->features &
7864 ~(NETIF_F_TSO | NETIF_F_TSO6));
7865 if (IS_ERR(segs) || !segs)
7866 goto tg3_tso_bug_end;
7872 tg3_start_xmit(nskb, tp->dev);
7876 dev_consume_skb_any(skb);
7878 return NETDEV_TX_OK;
7881 /* hard_start_xmit for all devices */
7882 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7884 struct tg3 *tp = netdev_priv(dev);
7885 u32 len, entry, base_flags, mss, vlan = 0;
7887 int i = -1, would_hit_hwbug;
7889 struct tg3_napi *tnapi;
7890 struct netdev_queue *txq;
7892 struct iphdr *iph = NULL;
7893 struct tcphdr *tcph = NULL;
7894 __sum16 tcp_csum = 0, ip_csum = 0;
7895 __be16 ip_tot_len = 0;
7897 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7898 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7899 if (tg3_flag(tp, ENABLE_TSS))
7902 budget = tg3_tx_avail(tnapi);
7904 /* We are running in BH disabled context with netif_tx_lock
7905 * and TX reclaim runs via tp->napi.poll inside of a software
7906 * interrupt. Furthermore, IRQ processing runs lockless so we have
7907 * no IRQ context deadlocks to worry about either. Rejoice!
7909 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7910 if (!netif_tx_queue_stopped(txq)) {
7911 netif_tx_stop_queue(txq);
7913 /* This is a hard error, log it. */
7915 "BUG! Tx Ring full when queue awake!\n");
7917 return NETDEV_TX_BUSY;
7920 entry = tnapi->tx_prod;
7923 mss = skb_shinfo(skb)->gso_size;
7925 u32 tcp_opt_len, hdr_len;
7927 if (skb_cow_head(skb, 0))
7931 tcp_opt_len = tcp_optlen(skb);
7933 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7935 /* HW/FW can not correctly segment packets that have been
7936 * vlan encapsulated.
7938 if (skb->protocol == htons(ETH_P_8021Q) ||
7939 skb->protocol == htons(ETH_P_8021AD)) {
7940 if (tg3_tso_bug_gso_check(tnapi, skb))
7941 return tg3_tso_bug(tp, tnapi, txq, skb);
7945 if (!skb_is_gso_v6(skb)) {
7946 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7947 tg3_flag(tp, TSO_BUG)) {
7948 if (tg3_tso_bug_gso_check(tnapi, skb))
7949 return tg3_tso_bug(tp, tnapi, txq, skb);
7952 ip_csum = iph->check;
7953 ip_tot_len = iph->tot_len;
7955 iph->tot_len = htons(mss + hdr_len);
7958 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7959 TXD_FLAG_CPU_POST_DMA);
7961 tcph = tcp_hdr(skb);
7962 tcp_csum = tcph->check;
7964 if (tg3_flag(tp, HW_TSO_1) ||
7965 tg3_flag(tp, HW_TSO_2) ||
7966 tg3_flag(tp, HW_TSO_3)) {
7968 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7970 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7974 if (tg3_flag(tp, HW_TSO_3)) {
7975 mss |= (hdr_len & 0xc) << 12;
7977 base_flags |= 0x00000010;
7978 base_flags |= (hdr_len & 0x3e0) << 5;
7979 } else if (tg3_flag(tp, HW_TSO_2))
7980 mss |= hdr_len << 9;
7981 else if (tg3_flag(tp, HW_TSO_1) ||
7982 tg3_asic_rev(tp) == ASIC_REV_5705) {
7983 if (tcp_opt_len || iph->ihl > 5) {
7986 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7987 mss |= (tsflags << 11);
7990 if (tcp_opt_len || iph->ihl > 5) {
7993 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7994 base_flags |= tsflags << 12;
7997 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
7998 /* HW/FW can not correctly checksum packets that have been
7999 * vlan encapsulated.
8001 if (skb->protocol == htons(ETH_P_8021Q) ||
8002 skb->protocol == htons(ETH_P_8021AD)) {
8003 if (skb_checksum_help(skb))
8006 base_flags |= TXD_FLAG_TCPUDP_CSUM;
8010 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8011 !mss && skb->len > VLAN_ETH_FRAME_LEN)
8012 base_flags |= TXD_FLAG_JMB_PKT;
8014 if (skb_vlan_tag_present(skb)) {
8015 base_flags |= TXD_FLAG_VLAN;
8016 vlan = skb_vlan_tag_get(skb);
8019 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8020 tg3_flag(tp, TX_TSTAMP_EN)) {
8021 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8022 base_flags |= TXD_FLAG_HWTSTAMP;
8025 len = skb_headlen(skb);
8027 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8028 if (pci_dma_mapping_error(tp->pdev, mapping))
8032 tnapi->tx_buffers[entry].skb = skb;
8033 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8035 would_hit_hwbug = 0;
8037 if (tg3_flag(tp, 5701_DMA_BUG))
8038 would_hit_hwbug = 1;
8040 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8041 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8043 would_hit_hwbug = 1;
8044 } else if (skb_shinfo(skb)->nr_frags > 0) {
8047 if (!tg3_flag(tp, HW_TSO_1) &&
8048 !tg3_flag(tp, HW_TSO_2) &&
8049 !tg3_flag(tp, HW_TSO_3))
8052 /* Now loop through additional data
8053 * fragments, and queue them.
8055 last = skb_shinfo(skb)->nr_frags - 1;
8056 for (i = 0; i <= last; i++) {
8057 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8059 len = skb_frag_size(frag);
8060 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8061 len, DMA_TO_DEVICE);
8063 tnapi->tx_buffers[entry].skb = NULL;
8064 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8066 if (dma_mapping_error(&tp->pdev->dev, mapping))
8070 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8072 ((i == last) ? TXD_FLAG_END : 0),
8074 would_hit_hwbug = 1;
8080 if (would_hit_hwbug) {
8081 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8083 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8084 /* If it's a TSO packet, do GSO instead of
8085 * allocating and copying to a large linear SKB
8088 iph->check = ip_csum;
8089 iph->tot_len = ip_tot_len;
8091 tcph->check = tcp_csum;
8092 return tg3_tso_bug(tp, tnapi, txq, skb);
8095 /* If the workaround fails due to memory/mapping
8096 * failure, silently drop this packet.
8098 entry = tnapi->tx_prod;
8099 budget = tg3_tx_avail(tnapi);
8100 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8101 base_flags, mss, vlan))
8105 skb_tx_timestamp(skb);
8106 netdev_tx_sent_queue(txq, skb->len);
8108 /* Sync BD data before updating mailbox */
8111 tnapi->tx_prod = entry;
8112 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8113 netif_tx_stop_queue(txq);
8115 /* netif_tx_stop_queue() must be done before checking
8116 * checking tx index in tg3_tx_avail() below, because in
8117 * tg3_tx(), we update tx index before checking for
8118 * netif_tx_queue_stopped().
8121 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8122 netif_tx_wake_queue(txq);
8125 if (!skb->xmit_more || netif_xmit_stopped(txq)) {
8126 /* Packets are ready, update Tx producer idx on card. */
8127 tw32_tx_mbox(tnapi->prodmbox, entry);
8131 return NETDEV_TX_OK;
8134 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8135 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8137 dev_kfree_skb_any(skb);
8140 return NETDEV_TX_OK;
8143 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8146 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8147 MAC_MODE_PORT_MODE_MASK);
8149 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8151 if (!tg3_flag(tp, 5705_PLUS))
8152 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8154 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8155 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8157 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8159 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8161 if (tg3_flag(tp, 5705_PLUS) ||
8162 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8163 tg3_asic_rev(tp) == ASIC_REV_5700)
8164 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8167 tw32(MAC_MODE, tp->mac_mode);
8171 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8173 u32 val, bmcr, mac_mode, ptest = 0;
8175 tg3_phy_toggle_apd(tp, false);
8176 tg3_phy_toggle_automdix(tp, false);
8178 if (extlpbk && tg3_phy_set_extloopbk(tp))
8181 bmcr = BMCR_FULLDPLX;
8186 bmcr |= BMCR_SPEED100;
8190 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8192 bmcr |= BMCR_SPEED100;
8195 bmcr |= BMCR_SPEED1000;
8200 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8201 tg3_readphy(tp, MII_CTRL1000, &val);
8202 val |= CTL1000_AS_MASTER |
8203 CTL1000_ENABLE_MASTER;
8204 tg3_writephy(tp, MII_CTRL1000, val);
8206 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8207 MII_TG3_FET_PTEST_TRIM_2;
8208 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8211 bmcr |= BMCR_LOOPBACK;
8213 tg3_writephy(tp, MII_BMCR, bmcr);
8215 /* The write needs to be flushed for the FETs */
8216 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8217 tg3_readphy(tp, MII_BMCR, &bmcr);
8221 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8222 tg3_asic_rev(tp) == ASIC_REV_5785) {
8223 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8224 MII_TG3_FET_PTEST_FRC_TX_LINK |
8225 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8227 /* The write needs to be flushed for the AC131 */
8228 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8231 /* Reset to prevent losing 1st rx packet intermittently */
8232 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8233 tg3_flag(tp, 5780_CLASS)) {
8234 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8236 tw32_f(MAC_RX_MODE, tp->rx_mode);
8239 mac_mode = tp->mac_mode &
8240 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8241 if (speed == SPEED_1000)
8242 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8244 mac_mode |= MAC_MODE_PORT_MODE_MII;
8246 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8247 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8249 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8250 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8251 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8252 mac_mode |= MAC_MODE_LINK_POLARITY;
8254 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8255 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8258 tw32(MAC_MODE, mac_mode);
8264 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8266 struct tg3 *tp = netdev_priv(dev);
8268 if (features & NETIF_F_LOOPBACK) {
8269 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8272 spin_lock_bh(&tp->lock);
8273 tg3_mac_loopback(tp, true);
8274 netif_carrier_on(tp->dev);
8275 spin_unlock_bh(&tp->lock);
8276 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8278 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8281 spin_lock_bh(&tp->lock);
8282 tg3_mac_loopback(tp, false);
8283 /* Force link status check */
8284 tg3_setup_phy(tp, true);
8285 spin_unlock_bh(&tp->lock);
8286 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8290 static netdev_features_t tg3_fix_features(struct net_device *dev,
8291 netdev_features_t features)
8293 struct tg3 *tp = netdev_priv(dev);
8295 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8296 features &= ~NETIF_F_ALL_TSO;
8301 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8303 netdev_features_t changed = dev->features ^ features;
8305 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8306 tg3_set_loopback(dev, features);
8311 static void tg3_rx_prodring_free(struct tg3 *tp,
8312 struct tg3_rx_prodring_set *tpr)
8316 if (tpr != &tp->napi[0].prodring) {
8317 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8318 i = (i + 1) & tp->rx_std_ring_mask)
8319 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8322 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8323 for (i = tpr->rx_jmb_cons_idx;
8324 i != tpr->rx_jmb_prod_idx;
8325 i = (i + 1) & tp->rx_jmb_ring_mask) {
8326 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8334 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8335 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8338 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8339 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8340 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8345 /* Initialize rx rings for packet processing.
8347 * The chip has been shut down and the driver detached from
8348 * the networking, so no interrupts or new tx packets will
8349 * end up in the driver. tp->{tx,}lock are held and thus
8352 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8353 struct tg3_rx_prodring_set *tpr)
8355 u32 i, rx_pkt_dma_sz;
8357 tpr->rx_std_cons_idx = 0;
8358 tpr->rx_std_prod_idx = 0;
8359 tpr->rx_jmb_cons_idx = 0;
8360 tpr->rx_jmb_prod_idx = 0;
8362 if (tpr != &tp->napi[0].prodring) {
8363 memset(&tpr->rx_std_buffers[0], 0,
8364 TG3_RX_STD_BUFF_RING_SIZE(tp));
8365 if (tpr->rx_jmb_buffers)
8366 memset(&tpr->rx_jmb_buffers[0], 0,
8367 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8371 /* Zero out all descriptors. */
8372 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8374 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8375 if (tg3_flag(tp, 5780_CLASS) &&
8376 tp->dev->mtu > ETH_DATA_LEN)
8377 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8378 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8380 /* Initialize invariants of the rings, we only set this
8381 * stuff once. This works because the card does not
8382 * write into the rx buffer posting rings.
8384 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8385 struct tg3_rx_buffer_desc *rxd;
8387 rxd = &tpr->rx_std[i];
8388 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8389 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8390 rxd->opaque = (RXD_OPAQUE_RING_STD |
8391 (i << RXD_OPAQUE_INDEX_SHIFT));
8394 /* Now allocate fresh SKBs for each rx ring. */
8395 for (i = 0; i < tp->rx_pending; i++) {
8396 unsigned int frag_size;
8398 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8400 netdev_warn(tp->dev,
8401 "Using a smaller RX standard ring. Only "
8402 "%d out of %d buffers were allocated "
8403 "successfully\n", i, tp->rx_pending);
8411 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8414 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8416 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8419 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8420 struct tg3_rx_buffer_desc *rxd;
8422 rxd = &tpr->rx_jmb[i].std;
8423 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8424 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8426 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8427 (i << RXD_OPAQUE_INDEX_SHIFT));
8430 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8431 unsigned int frag_size;
8433 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8435 netdev_warn(tp->dev,
8436 "Using a smaller RX jumbo ring. Only %d "
8437 "out of %d buffers were allocated "
8438 "successfully\n", i, tp->rx_jumbo_pending);
8441 tp->rx_jumbo_pending = i;
8450 tg3_rx_prodring_free(tp, tpr);
8454 static void tg3_rx_prodring_fini(struct tg3 *tp,
8455 struct tg3_rx_prodring_set *tpr)
8457 kfree(tpr->rx_std_buffers);
8458 tpr->rx_std_buffers = NULL;
8459 kfree(tpr->rx_jmb_buffers);
8460 tpr->rx_jmb_buffers = NULL;
8462 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8463 tpr->rx_std, tpr->rx_std_mapping);
8467 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8468 tpr->rx_jmb, tpr->rx_jmb_mapping);
8473 static int tg3_rx_prodring_init(struct tg3 *tp,
8474 struct tg3_rx_prodring_set *tpr)
8476 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8478 if (!tpr->rx_std_buffers)
8481 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8482 TG3_RX_STD_RING_BYTES(tp),
8483 &tpr->rx_std_mapping,
8488 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8489 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8491 if (!tpr->rx_jmb_buffers)
8494 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8495 TG3_RX_JMB_RING_BYTES(tp),
8496 &tpr->rx_jmb_mapping,
8505 tg3_rx_prodring_fini(tp, tpr);
8509 /* Free up pending packets in all rx/tx rings.
8511 * The chip has been shut down and the driver detached from
8512 * the networking, so no interrupts or new tx packets will
8513 * end up in the driver. tp->{tx,}lock is not held and we are not
8514 * in an interrupt context and thus may sleep.
8516 static void tg3_free_rings(struct tg3 *tp)
8520 for (j = 0; j < tp->irq_cnt; j++) {
8521 struct tg3_napi *tnapi = &tp->napi[j];
8523 tg3_rx_prodring_free(tp, &tnapi->prodring);
8525 if (!tnapi->tx_buffers)
8528 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8529 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8534 tg3_tx_skb_unmap(tnapi, i,
8535 skb_shinfo(skb)->nr_frags - 1);
8537 dev_consume_skb_any(skb);
8539 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8543 /* Initialize tx/rx rings for packet processing.
8545 * The chip has been shut down and the driver detached from
8546 * the networking, so no interrupts or new tx packets will
8547 * end up in the driver. tp->{tx,}lock are held and thus
8550 static int tg3_init_rings(struct tg3 *tp)
8554 /* Free up all the SKBs. */
8557 for (i = 0; i < tp->irq_cnt; i++) {
8558 struct tg3_napi *tnapi = &tp->napi[i];
8560 tnapi->last_tag = 0;
8561 tnapi->last_irq_tag = 0;
8562 tnapi->hw_status->status = 0;
8563 tnapi->hw_status->status_tag = 0;
8564 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8569 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8571 tnapi->rx_rcb_ptr = 0;
8573 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8575 if (tnapi->prodring.rx_std &&
8576 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8585 static void tg3_mem_tx_release(struct tg3 *tp)
8589 for (i = 0; i < tp->irq_max; i++) {
8590 struct tg3_napi *tnapi = &tp->napi[i];
8592 if (tnapi->tx_ring) {
8593 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8594 tnapi->tx_ring, tnapi->tx_desc_mapping);
8595 tnapi->tx_ring = NULL;
8598 kfree(tnapi->tx_buffers);
8599 tnapi->tx_buffers = NULL;
8603 static int tg3_mem_tx_acquire(struct tg3 *tp)
8606 struct tg3_napi *tnapi = &tp->napi[0];
8608 /* If multivector TSS is enabled, vector 0 does not handle
8609 * tx interrupts. Don't allocate any resources for it.
8611 if (tg3_flag(tp, ENABLE_TSS))
8614 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8615 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8616 TG3_TX_RING_SIZE, GFP_KERNEL);
8617 if (!tnapi->tx_buffers)
8620 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8622 &tnapi->tx_desc_mapping,
8624 if (!tnapi->tx_ring)
8631 tg3_mem_tx_release(tp);
8635 static void tg3_mem_rx_release(struct tg3 *tp)
8639 for (i = 0; i < tp->irq_max; i++) {
8640 struct tg3_napi *tnapi = &tp->napi[i];
8642 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8647 dma_free_coherent(&tp->pdev->dev,
8648 TG3_RX_RCB_RING_BYTES(tp),
8650 tnapi->rx_rcb_mapping);
8651 tnapi->rx_rcb = NULL;
8655 static int tg3_mem_rx_acquire(struct tg3 *tp)
8657 unsigned int i, limit;
8659 limit = tp->rxq_cnt;
8661 /* If RSS is enabled, we need a (dummy) producer ring
8662 * set on vector zero. This is the true hw prodring.
8664 if (tg3_flag(tp, ENABLE_RSS))
8667 for (i = 0; i < limit; i++) {
8668 struct tg3_napi *tnapi = &tp->napi[i];
8670 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8673 /* If multivector RSS is enabled, vector 0
8674 * does not handle rx or tx interrupts.
8675 * Don't allocate any resources for it.
8677 if (!i && tg3_flag(tp, ENABLE_RSS))
8680 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8681 TG3_RX_RCB_RING_BYTES(tp),
8682 &tnapi->rx_rcb_mapping,
8691 tg3_mem_rx_release(tp);
8696 * Must not be invoked with interrupt sources disabled and
8697 * the hardware shutdown down.
8699 static void tg3_free_consistent(struct tg3 *tp)
8703 for (i = 0; i < tp->irq_cnt; i++) {
8704 struct tg3_napi *tnapi = &tp->napi[i];
8706 if (tnapi->hw_status) {
8707 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8709 tnapi->status_mapping);
8710 tnapi->hw_status = NULL;
8714 tg3_mem_rx_release(tp);
8715 tg3_mem_tx_release(tp);
8717 /* tp->hw_stats can be referenced safely:
8718 * 1. under rtnl_lock
8719 * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8722 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8723 tp->hw_stats, tp->stats_mapping);
8724 tp->hw_stats = NULL;
8729 * Must not be invoked with interrupt sources disabled and
8730 * the hardware shutdown down. Can sleep.
8732 static int tg3_alloc_consistent(struct tg3 *tp)
8736 tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8737 sizeof(struct tg3_hw_stats),
8738 &tp->stats_mapping, GFP_KERNEL);
8742 for (i = 0; i < tp->irq_cnt; i++) {
8743 struct tg3_napi *tnapi = &tp->napi[i];
8744 struct tg3_hw_status *sblk;
8746 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8748 &tnapi->status_mapping,
8750 if (!tnapi->hw_status)
8753 sblk = tnapi->hw_status;
8755 if (tg3_flag(tp, ENABLE_RSS)) {
8756 u16 *prodptr = NULL;
8759 * When RSS is enabled, the status block format changes
8760 * slightly. The "rx_jumbo_consumer", "reserved",
8761 * and "rx_mini_consumer" members get mapped to the
8762 * other three rx return ring producer indexes.
8766 prodptr = &sblk->idx[0].rx_producer;
8769 prodptr = &sblk->rx_jumbo_consumer;
8772 prodptr = &sblk->reserved;
8775 prodptr = &sblk->rx_mini_consumer;
8778 tnapi->rx_rcb_prod_idx = prodptr;
8780 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8784 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8790 tg3_free_consistent(tp);
8794 #define MAX_WAIT_CNT 1000
8796 /* To stop a block, clear the enable bit and poll till it
8797 * clears. tp->lock is held.
8799 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8804 if (tg3_flag(tp, 5705_PLUS)) {
8811 /* We can't enable/disable these bits of the
8812 * 5705/5750, just say success.
8825 for (i = 0; i < MAX_WAIT_CNT; i++) {
8826 if (pci_channel_offline(tp->pdev)) {
8827 dev_err(&tp->pdev->dev,
8828 "tg3_stop_block device offline, "
8829 "ofs=%lx enable_bit=%x\n",
8836 if ((val & enable_bit) == 0)
8840 if (i == MAX_WAIT_CNT && !silent) {
8841 dev_err(&tp->pdev->dev,
8842 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8850 /* tp->lock is held. */
8851 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8855 tg3_disable_ints(tp);
8857 if (pci_channel_offline(tp->pdev)) {
8858 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8859 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8864 tp->rx_mode &= ~RX_MODE_ENABLE;
8865 tw32_f(MAC_RX_MODE, tp->rx_mode);
8868 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8869 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8870 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8871 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8872 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8873 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8875 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8876 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8877 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8878 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8879 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8880 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8881 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8883 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8884 tw32_f(MAC_MODE, tp->mac_mode);
8887 tp->tx_mode &= ~TX_MODE_ENABLE;
8888 tw32_f(MAC_TX_MODE, tp->tx_mode);
8890 for (i = 0; i < MAX_WAIT_CNT; i++) {
8892 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8895 if (i >= MAX_WAIT_CNT) {
8896 dev_err(&tp->pdev->dev,
8897 "%s timed out, TX_MODE_ENABLE will not clear "
8898 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8902 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8903 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8904 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8906 tw32(FTQ_RESET, 0xffffffff);
8907 tw32(FTQ_RESET, 0x00000000);
8909 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8910 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8913 for (i = 0; i < tp->irq_cnt; i++) {
8914 struct tg3_napi *tnapi = &tp->napi[i];
8915 if (tnapi->hw_status)
8916 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8922 /* Save PCI command register before chip reset */
8923 static void tg3_save_pci_state(struct tg3 *tp)
8925 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8928 /* Restore PCI state after chip reset */
8929 static void tg3_restore_pci_state(struct tg3 *tp)
8933 /* Re-enable indirect register accesses. */
8934 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8935 tp->misc_host_ctrl);
8937 /* Set MAX PCI retry to zero. */
8938 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8939 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8940 tg3_flag(tp, PCIX_MODE))
8941 val |= PCISTATE_RETRY_SAME_DMA;
8942 /* Allow reads and writes to the APE register and memory space. */
8943 if (tg3_flag(tp, ENABLE_APE))
8944 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8945 PCISTATE_ALLOW_APE_SHMEM_WR |
8946 PCISTATE_ALLOW_APE_PSPACE_WR;
8947 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8949 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8951 if (!tg3_flag(tp, PCI_EXPRESS)) {
8952 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8953 tp->pci_cacheline_sz);
8954 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8958 /* Make sure PCI-X relaxed ordering bit is clear. */
8959 if (tg3_flag(tp, PCIX_MODE)) {
8962 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8964 pcix_cmd &= ~PCI_X_CMD_ERO;
8965 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8969 if (tg3_flag(tp, 5780_CLASS)) {
8971 /* Chip reset on 5780 will reset MSI enable bit,
8972 * so need to restore it.
8974 if (tg3_flag(tp, USING_MSI)) {
8977 pci_read_config_word(tp->pdev,
8978 tp->msi_cap + PCI_MSI_FLAGS,
8980 pci_write_config_word(tp->pdev,
8981 tp->msi_cap + PCI_MSI_FLAGS,
8982 ctrl | PCI_MSI_FLAGS_ENABLE);
8983 val = tr32(MSGINT_MODE);
8984 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8989 static void tg3_override_clk(struct tg3 *tp)
8993 switch (tg3_asic_rev(tp)) {
8995 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
8996 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
8997 TG3_CPMU_MAC_ORIDE_ENABLE);
9002 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9010 static void tg3_restore_clk(struct tg3 *tp)
9014 switch (tg3_asic_rev(tp)) {
9016 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9017 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9018 val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9023 val = tr32(TG3_CPMU_CLCK_ORIDE);
9024 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9032 /* tp->lock is held. */
9033 static int tg3_chip_reset(struct tg3 *tp)
9034 __releases(tp->lock)
9035 __acquires(tp->lock)
9038 void (*write_op)(struct tg3 *, u32, u32);
9041 if (!pci_device_is_present(tp->pdev))
9046 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9048 /* No matching tg3_nvram_unlock() after this because
9049 * chip reset below will undo the nvram lock.
9051 tp->nvram_lock_cnt = 0;
9053 /* GRC_MISC_CFG core clock reset will clear the memory
9054 * enable bit in PCI register 4 and the MSI enable bit
9055 * on some chips, so we save relevant registers here.
9057 tg3_save_pci_state(tp);
9059 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9060 tg3_flag(tp, 5755_PLUS))
9061 tw32(GRC_FASTBOOT_PC, 0);
9064 * We must avoid the readl() that normally takes place.
9065 * It locks machines, causes machine checks, and other
9066 * fun things. So, temporarily disable the 5701
9067 * hardware workaround, while we do the reset.
9069 write_op = tp->write32;
9070 if (write_op == tg3_write_flush_reg32)
9071 tp->write32 = tg3_write32;
9073 /* Prevent the irq handler from reading or writing PCI registers
9074 * during chip reset when the memory enable bit in the PCI command
9075 * register may be cleared. The chip does not generate interrupt
9076 * at this time, but the irq handler may still be called due to irq
9077 * sharing or irqpoll.
9079 tg3_flag_set(tp, CHIP_RESETTING);
9080 for (i = 0; i < tp->irq_cnt; i++) {
9081 struct tg3_napi *tnapi = &tp->napi[i];
9082 if (tnapi->hw_status) {
9083 tnapi->hw_status->status = 0;
9084 tnapi->hw_status->status_tag = 0;
9086 tnapi->last_tag = 0;
9087 tnapi->last_irq_tag = 0;
9091 tg3_full_unlock(tp);
9093 for (i = 0; i < tp->irq_cnt; i++)
9094 synchronize_irq(tp->napi[i].irq_vec);
9096 tg3_full_lock(tp, 0);
9098 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9099 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9100 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9104 val = GRC_MISC_CFG_CORECLK_RESET;
9106 if (tg3_flag(tp, PCI_EXPRESS)) {
9107 /* Force PCIe 1.0a mode */
9108 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9109 !tg3_flag(tp, 57765_PLUS) &&
9110 tr32(TG3_PCIE_PHY_TSTCTL) ==
9111 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9112 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9114 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9115 tw32(GRC_MISC_CFG, (1 << 29));
9120 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9121 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9122 tw32(GRC_VCPU_EXT_CTRL,
9123 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9126 /* Set the clock to the highest frequency to avoid timeouts. With link
9127 * aware mode, the clock speed could be slow and bootcode does not
9128 * complete within the expected time. Override the clock to allow the
9129 * bootcode to finish sooner and then restore it.
9131 tg3_override_clk(tp);
9133 /* Manage gphy power for all CPMU absent PCIe devices. */
9134 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9135 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9137 tw32(GRC_MISC_CFG, val);
9139 /* restore 5701 hardware bug workaround write method */
9140 tp->write32 = write_op;
9142 /* Unfortunately, we have to delay before the PCI read back.
9143 * Some 575X chips even will not respond to a PCI cfg access
9144 * when the reset command is given to the chip.
9146 * How do these hardware designers expect things to work
9147 * properly if the PCI write is posted for a long period
9148 * of time? It is always necessary to have some method by
9149 * which a register read back can occur to push the write
9150 * out which does the reset.
9152 * For most tg3 variants the trick below was working.
9157 /* Flush PCI posted writes. The normal MMIO registers
9158 * are inaccessible at this time so this is the only
9159 * way to make this reliably (actually, this is no longer
9160 * the case, see above). I tried to use indirect
9161 * register read/write but this upset some 5701 variants.
9163 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9167 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9170 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9174 /* Wait for link training to complete. */
9175 for (j = 0; j < 5000; j++)
9178 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9179 pci_write_config_dword(tp->pdev, 0xc4,
9180 cfg_val | (1 << 15));
9183 /* Clear the "no snoop" and "relaxed ordering" bits. */
9184 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9186 * Older PCIe devices only support the 128 byte
9187 * MPS setting. Enforce the restriction.
9189 if (!tg3_flag(tp, CPMU_PRESENT))
9190 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9191 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9193 /* Clear error status */
9194 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9195 PCI_EXP_DEVSTA_CED |
9196 PCI_EXP_DEVSTA_NFED |
9197 PCI_EXP_DEVSTA_FED |
9198 PCI_EXP_DEVSTA_URD);
9201 tg3_restore_pci_state(tp);
9203 tg3_flag_clear(tp, CHIP_RESETTING);
9204 tg3_flag_clear(tp, ERROR_PROCESSED);
9207 if (tg3_flag(tp, 5780_CLASS))
9208 val = tr32(MEMARB_MODE);
9209 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9211 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9213 tw32(0x5000, 0x400);
9216 if (tg3_flag(tp, IS_SSB_CORE)) {
9218 * BCM4785: In order to avoid repercussions from using
9219 * potentially defective internal ROM, stop the Rx RISC CPU,
9220 * which is not required.
9223 tg3_halt_cpu(tp, RX_CPU_BASE);
9226 err = tg3_poll_fw(tp);
9230 tw32(GRC_MODE, tp->grc_mode);
9232 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9235 tw32(0xc4, val | (1 << 15));
9238 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9239 tg3_asic_rev(tp) == ASIC_REV_5705) {
9240 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9241 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9242 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9243 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9246 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9247 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9249 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9250 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9255 tw32_f(MAC_MODE, val);
9258 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9262 if (tg3_flag(tp, PCI_EXPRESS) &&
9263 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9264 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9265 !tg3_flag(tp, 57765_PLUS)) {
9268 tw32(0x7c00, val | (1 << 25));
9271 tg3_restore_clk(tp);
9273 /* Increase the core clock speed to fix tx timeout issue for 5762
9274 * with 100Mbps link speed.
9276 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9277 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9278 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9279 TG3_CPMU_MAC_ORIDE_ENABLE);
9282 /* Reprobe ASF enable state. */
9283 tg3_flag_clear(tp, ENABLE_ASF);
9284 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9285 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9287 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9288 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9289 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9292 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9293 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9294 tg3_flag_set(tp, ENABLE_ASF);
9295 tp->last_event_jiffies = jiffies;
9296 if (tg3_flag(tp, 5750_PLUS))
9297 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9299 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9300 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9301 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9302 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9303 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9310 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9311 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9312 static void __tg3_set_rx_mode(struct net_device *);
9314 /* tp->lock is held. */
9315 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9321 tg3_write_sig_pre_reset(tp, kind);
9323 tg3_abort_hw(tp, silent);
9324 err = tg3_chip_reset(tp);
9326 __tg3_set_mac_addr(tp, false);
9328 tg3_write_sig_legacy(tp, kind);
9329 tg3_write_sig_post_reset(tp, kind);
9332 /* Save the stats across chip resets... */
9333 tg3_get_nstats(tp, &tp->net_stats_prev);
9334 tg3_get_estats(tp, &tp->estats_prev);
9336 /* And make sure the next sample is new data */
9337 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9343 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9345 struct tg3 *tp = netdev_priv(dev);
9346 struct sockaddr *addr = p;
9348 bool skip_mac_1 = false;
9350 if (!is_valid_ether_addr(addr->sa_data))
9351 return -EADDRNOTAVAIL;
9353 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9355 if (!netif_running(dev))
9358 if (tg3_flag(tp, ENABLE_ASF)) {
9359 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9361 addr0_high = tr32(MAC_ADDR_0_HIGH);
9362 addr0_low = tr32(MAC_ADDR_0_LOW);
9363 addr1_high = tr32(MAC_ADDR_1_HIGH);
9364 addr1_low = tr32(MAC_ADDR_1_LOW);
9366 /* Skip MAC addr 1 if ASF is using it. */
9367 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9368 !(addr1_high == 0 && addr1_low == 0))
9371 spin_lock_bh(&tp->lock);
9372 __tg3_set_mac_addr(tp, skip_mac_1);
9373 __tg3_set_rx_mode(dev);
9374 spin_unlock_bh(&tp->lock);
9379 /* tp->lock is held. */
9380 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9381 dma_addr_t mapping, u32 maxlen_flags,
9385 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9386 ((u64) mapping >> 32));
9388 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9389 ((u64) mapping & 0xffffffff));
9391 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9394 if (!tg3_flag(tp, 5705_PLUS))
9396 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9401 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9405 if (!tg3_flag(tp, ENABLE_TSS)) {
9406 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9407 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9408 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9410 tw32(HOSTCC_TXCOL_TICKS, 0);
9411 tw32(HOSTCC_TXMAX_FRAMES, 0);
9412 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9414 for (; i < tp->txq_cnt; i++) {
9417 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9418 tw32(reg, ec->tx_coalesce_usecs);
9419 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9420 tw32(reg, ec->tx_max_coalesced_frames);
9421 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9422 tw32(reg, ec->tx_max_coalesced_frames_irq);
9426 for (; i < tp->irq_max - 1; i++) {
9427 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9428 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9429 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9433 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9436 u32 limit = tp->rxq_cnt;
9438 if (!tg3_flag(tp, ENABLE_RSS)) {
9439 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9440 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9441 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9444 tw32(HOSTCC_RXCOL_TICKS, 0);
9445 tw32(HOSTCC_RXMAX_FRAMES, 0);
9446 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9449 for (; i < limit; i++) {
9452 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9453 tw32(reg, ec->rx_coalesce_usecs);
9454 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9455 tw32(reg, ec->rx_max_coalesced_frames);
9456 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9457 tw32(reg, ec->rx_max_coalesced_frames_irq);
9460 for (; i < tp->irq_max - 1; i++) {
9461 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9462 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9463 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9467 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9469 tg3_coal_tx_init(tp, ec);
9470 tg3_coal_rx_init(tp, ec);
9472 if (!tg3_flag(tp, 5705_PLUS)) {
9473 u32 val = ec->stats_block_coalesce_usecs;
9475 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9476 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9481 tw32(HOSTCC_STAT_COAL_TICKS, val);
9485 /* tp->lock is held. */
9486 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9490 /* Disable all transmit rings but the first. */
9491 if (!tg3_flag(tp, 5705_PLUS))
9492 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9493 else if (tg3_flag(tp, 5717_PLUS))
9494 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9495 else if (tg3_flag(tp, 57765_CLASS) ||
9496 tg3_asic_rev(tp) == ASIC_REV_5762)
9497 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9499 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9501 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9502 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9503 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9504 BDINFO_FLAGS_DISABLED);
9507 /* tp->lock is held. */
9508 static void tg3_tx_rcbs_init(struct tg3 *tp)
9511 u32 txrcb = NIC_SRAM_SEND_RCB;
9513 if (tg3_flag(tp, ENABLE_TSS))
9516 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9517 struct tg3_napi *tnapi = &tp->napi[i];
9519 if (!tnapi->tx_ring)
9522 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9523 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9524 NIC_SRAM_TX_BUFFER_DESC);
9528 /* tp->lock is held. */
9529 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9533 /* Disable all receive return rings but the first. */
9534 if (tg3_flag(tp, 5717_PLUS))
9535 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9536 else if (!tg3_flag(tp, 5705_PLUS))
9537 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9538 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9539 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9540 tg3_flag(tp, 57765_CLASS))
9541 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9543 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9545 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9546 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9547 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9548 BDINFO_FLAGS_DISABLED);
9551 /* tp->lock is held. */
9552 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9555 u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9557 if (tg3_flag(tp, ENABLE_RSS))
9560 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9561 struct tg3_napi *tnapi = &tp->napi[i];
9566 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9567 (tp->rx_ret_ring_mask + 1) <<
9568 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9572 /* tp->lock is held. */
9573 static void tg3_rings_reset(struct tg3 *tp)
9577 struct tg3_napi *tnapi = &tp->napi[0];
9579 tg3_tx_rcbs_disable(tp);
9581 tg3_rx_ret_rcbs_disable(tp);
9583 /* Disable interrupts */
9584 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9585 tp->napi[0].chk_msi_cnt = 0;
9586 tp->napi[0].last_rx_cons = 0;
9587 tp->napi[0].last_tx_cons = 0;
9589 /* Zero mailbox registers. */
9590 if (tg3_flag(tp, SUPPORT_MSIX)) {
9591 for (i = 1; i < tp->irq_max; i++) {
9592 tp->napi[i].tx_prod = 0;
9593 tp->napi[i].tx_cons = 0;
9594 if (tg3_flag(tp, ENABLE_TSS))
9595 tw32_mailbox(tp->napi[i].prodmbox, 0);
9596 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9597 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9598 tp->napi[i].chk_msi_cnt = 0;
9599 tp->napi[i].last_rx_cons = 0;
9600 tp->napi[i].last_tx_cons = 0;
9602 if (!tg3_flag(tp, ENABLE_TSS))
9603 tw32_mailbox(tp->napi[0].prodmbox, 0);
9605 tp->napi[0].tx_prod = 0;
9606 tp->napi[0].tx_cons = 0;
9607 tw32_mailbox(tp->napi[0].prodmbox, 0);
9608 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9611 /* Make sure the NIC-based send BD rings are disabled. */
9612 if (!tg3_flag(tp, 5705_PLUS)) {
9613 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9614 for (i = 0; i < 16; i++)
9615 tw32_tx_mbox(mbox + i * 8, 0);
9618 /* Clear status block in ram. */
9619 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9621 /* Set status block DMA address */
9622 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9623 ((u64) tnapi->status_mapping >> 32));
9624 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9625 ((u64) tnapi->status_mapping & 0xffffffff));
9627 stblk = HOSTCC_STATBLCK_RING1;
9629 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9630 u64 mapping = (u64)tnapi->status_mapping;
9631 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9632 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9635 /* Clear status block in ram. */
9636 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9639 tg3_tx_rcbs_init(tp);
9640 tg3_rx_ret_rcbs_init(tp);
9643 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9645 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9647 if (!tg3_flag(tp, 5750_PLUS) ||
9648 tg3_flag(tp, 5780_CLASS) ||
9649 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9650 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9651 tg3_flag(tp, 57765_PLUS))
9652 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9653 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9654 tg3_asic_rev(tp) == ASIC_REV_5787)
9655 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9657 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9659 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9660 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9662 val = min(nic_rep_thresh, host_rep_thresh);
9663 tw32(RCVBDI_STD_THRESH, val);
9665 if (tg3_flag(tp, 57765_PLUS))
9666 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9668 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9671 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9673 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9675 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9676 tw32(RCVBDI_JUMBO_THRESH, val);
9678 if (tg3_flag(tp, 57765_PLUS))
9679 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9682 static inline u32 calc_crc(unsigned char *buf, int len)
9690 for (j = 0; j < len; j++) {
9693 for (k = 0; k < 8; k++) {
9706 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9708 /* accept or reject all multicast frames */
9709 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9710 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9711 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9712 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9715 static void __tg3_set_rx_mode(struct net_device *dev)
9717 struct tg3 *tp = netdev_priv(dev);
9720 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9721 RX_MODE_KEEP_VLAN_TAG);
9723 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9724 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9727 if (!tg3_flag(tp, ENABLE_ASF))
9728 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9731 if (dev->flags & IFF_PROMISC) {
9732 /* Promiscuous mode. */
9733 rx_mode |= RX_MODE_PROMISC;
9734 } else if (dev->flags & IFF_ALLMULTI) {
9735 /* Accept all multicast. */
9736 tg3_set_multi(tp, 1);
9737 } else if (netdev_mc_empty(dev)) {
9738 /* Reject all multicast. */
9739 tg3_set_multi(tp, 0);
9741 /* Accept one or more multicast(s). */
9742 struct netdev_hw_addr *ha;
9743 u32 mc_filter[4] = { 0, };
9748 netdev_for_each_mc_addr(ha, dev) {
9749 crc = calc_crc(ha->addr, ETH_ALEN);
9751 regidx = (bit & 0x60) >> 5;
9753 mc_filter[regidx] |= (1 << bit);
9756 tw32(MAC_HASH_REG_0, mc_filter[0]);
9757 tw32(MAC_HASH_REG_1, mc_filter[1]);
9758 tw32(MAC_HASH_REG_2, mc_filter[2]);
9759 tw32(MAC_HASH_REG_3, mc_filter[3]);
9762 if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9763 rx_mode |= RX_MODE_PROMISC;
9764 } else if (!(dev->flags & IFF_PROMISC)) {
9765 /* Add all entries into to the mac addr filter list */
9767 struct netdev_hw_addr *ha;
9769 netdev_for_each_uc_addr(ha, dev) {
9770 __tg3_set_one_mac_addr(tp, ha->addr,
9771 i + TG3_UCAST_ADDR_IDX(tp));
9776 if (rx_mode != tp->rx_mode) {
9777 tp->rx_mode = rx_mode;
9778 tw32_f(MAC_RX_MODE, rx_mode);
9783 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9787 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9788 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9791 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9795 if (!tg3_flag(tp, SUPPORT_MSIX))
9798 if (tp->rxq_cnt == 1) {
9799 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9803 /* Validate table against current IRQ count */
9804 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9805 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9809 if (i != TG3_RSS_INDIR_TBL_SIZE)
9810 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9813 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9816 u32 reg = MAC_RSS_INDIR_TBL_0;
9818 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9819 u32 val = tp->rss_ind_tbl[i];
9821 for (; i % 8; i++) {
9823 val |= tp->rss_ind_tbl[i];
9830 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9832 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9833 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9835 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9838 /* tp->lock is held. */
9839 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9841 u32 val, rdmac_mode;
9843 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9845 tg3_disable_ints(tp);
9849 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9851 if (tg3_flag(tp, INIT_COMPLETE))
9852 tg3_abort_hw(tp, 1);
9854 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9855 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9856 tg3_phy_pull_config(tp);
9857 tg3_eee_pull_config(tp, NULL);
9858 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9861 /* Enable MAC control of LPI */
9862 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9868 err = tg3_chip_reset(tp);
9872 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9874 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9875 val = tr32(TG3_CPMU_CTRL);
9876 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9877 tw32(TG3_CPMU_CTRL, val);
9879 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9880 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9881 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9882 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9884 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9885 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9886 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9887 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9889 val = tr32(TG3_CPMU_HST_ACC);
9890 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9891 val |= CPMU_HST_ACC_MACCLK_6_25;
9892 tw32(TG3_CPMU_HST_ACC, val);
9895 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9896 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9897 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9898 PCIE_PWR_MGMT_L1_THRESH_4MS;
9899 tw32(PCIE_PWR_MGMT_THRESH, val);
9901 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9902 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9904 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9906 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9907 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9910 if (tg3_flag(tp, L1PLLPD_EN)) {
9911 u32 grc_mode = tr32(GRC_MODE);
9913 /* Access the lower 1K of PL PCIE block registers. */
9914 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9915 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9917 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9918 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9919 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9921 tw32(GRC_MODE, grc_mode);
9924 if (tg3_flag(tp, 57765_CLASS)) {
9925 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9926 u32 grc_mode = tr32(GRC_MODE);
9928 /* Access the lower 1K of PL PCIE block registers. */
9929 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9930 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9932 val = tr32(TG3_PCIE_TLDLPL_PORT +
9933 TG3_PCIE_PL_LO_PHYCTL5);
9934 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9935 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9937 tw32(GRC_MODE, grc_mode);
9940 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9943 /* Fix transmit hangs */
9944 val = tr32(TG3_CPMU_PADRNG_CTL);
9945 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9946 tw32(TG3_CPMU_PADRNG_CTL, val);
9948 grc_mode = tr32(GRC_MODE);
9950 /* Access the lower 1K of DL PCIE block registers. */
9951 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9952 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9954 val = tr32(TG3_PCIE_TLDLPL_PORT +
9955 TG3_PCIE_DL_LO_FTSMAX);
9956 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9957 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9958 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9960 tw32(GRC_MODE, grc_mode);
9963 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9964 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9965 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9966 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9969 /* This works around an issue with Athlon chipsets on
9970 * B3 tigon3 silicon. This bit has no effect on any
9971 * other revision. But do not set this on PCI Express
9972 * chips and don't even touch the clocks if the CPMU is present.
9974 if (!tg3_flag(tp, CPMU_PRESENT)) {
9975 if (!tg3_flag(tp, PCI_EXPRESS))
9976 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9977 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9980 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9981 tg3_flag(tp, PCIX_MODE)) {
9982 val = tr32(TG3PCI_PCISTATE);
9983 val |= PCISTATE_RETRY_SAME_DMA;
9984 tw32(TG3PCI_PCISTATE, val);
9987 if (tg3_flag(tp, ENABLE_APE)) {
9988 /* Allow reads and writes to the
9989 * APE register and memory space.
9991 val = tr32(TG3PCI_PCISTATE);
9992 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9993 PCISTATE_ALLOW_APE_SHMEM_WR |
9994 PCISTATE_ALLOW_APE_PSPACE_WR;
9995 tw32(TG3PCI_PCISTATE, val);
9998 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9999 /* Enable some hw fixes. */
10000 val = tr32(TG3PCI_MSI_DATA);
10001 val |= (1 << 26) | (1 << 28) | (1 << 29);
10002 tw32(TG3PCI_MSI_DATA, val);
10005 /* Descriptor ring init may make accesses to the
10006 * NIC SRAM area to setup the TX descriptors, so we
10007 * can only do this after the hardware has been
10008 * successfully reset.
10010 err = tg3_init_rings(tp);
10014 if (tg3_flag(tp, 57765_PLUS)) {
10015 val = tr32(TG3PCI_DMA_RW_CTRL) &
10016 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10017 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10018 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10019 if (!tg3_flag(tp, 57765_CLASS) &&
10020 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10021 tg3_asic_rev(tp) != ASIC_REV_5762)
10022 val |= DMA_RWCTRL_TAGGED_STAT_WA;
10023 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10024 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10025 tg3_asic_rev(tp) != ASIC_REV_5761) {
10026 /* This value is determined during the probe time DMA
10027 * engine test, tg3_test_dma.
10029 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10032 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10033 GRC_MODE_4X_NIC_SEND_RINGS |
10034 GRC_MODE_NO_TX_PHDR_CSUM |
10035 GRC_MODE_NO_RX_PHDR_CSUM);
10036 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10038 /* Pseudo-header checksum is done by hardware logic and not
10039 * the offload processers, so make the chip do the pseudo-
10040 * header checksums on receive. For transmit it is more
10041 * convenient to do the pseudo-header checksum in software
10042 * as Linux does that on transmit for us in all cases.
10044 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10046 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10048 tw32(TG3_RX_PTP_CTL,
10049 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10051 if (tg3_flag(tp, PTP_CAPABLE))
10052 val |= GRC_MODE_TIME_SYNC_ENABLE;
10054 tw32(GRC_MODE, tp->grc_mode | val);
10056 /* On one of the AMD platform, MRRS is restricted to 4000 because of
10057 * south bridge limitation. As a workaround, Driver is setting MRRS
10058 * to 2048 instead of default 4096.
10060 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10061 tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10062 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10063 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10066 /* Setup the timer prescalar register. Clock is always 66Mhz. */
10067 val = tr32(GRC_MISC_CFG);
10069 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10070 tw32(GRC_MISC_CFG, val);
10072 /* Initialize MBUF/DESC pool. */
10073 if (tg3_flag(tp, 5750_PLUS)) {
10075 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10076 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10077 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10078 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10080 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10081 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10082 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10083 } else if (tg3_flag(tp, TSO_CAPABLE)) {
10086 fw_len = tp->fw_len;
10087 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10088 tw32(BUFMGR_MB_POOL_ADDR,
10089 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10090 tw32(BUFMGR_MB_POOL_SIZE,
10091 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10094 if (tp->dev->mtu <= ETH_DATA_LEN) {
10095 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10096 tp->bufmgr_config.mbuf_read_dma_low_water);
10097 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10098 tp->bufmgr_config.mbuf_mac_rx_low_water);
10099 tw32(BUFMGR_MB_HIGH_WATER,
10100 tp->bufmgr_config.mbuf_high_water);
10102 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10103 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10104 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10105 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10106 tw32(BUFMGR_MB_HIGH_WATER,
10107 tp->bufmgr_config.mbuf_high_water_jumbo);
10109 tw32(BUFMGR_DMA_LOW_WATER,
10110 tp->bufmgr_config.dma_low_water);
10111 tw32(BUFMGR_DMA_HIGH_WATER,
10112 tp->bufmgr_config.dma_high_water);
10114 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10115 if (tg3_asic_rev(tp) == ASIC_REV_5719)
10116 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10117 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10118 tg3_asic_rev(tp) == ASIC_REV_5762 ||
10119 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10120 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10121 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10122 tw32(BUFMGR_MODE, val);
10123 for (i = 0; i < 2000; i++) {
10124 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10129 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10133 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10134 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10136 tg3_setup_rxbd_thresholds(tp);
10138 /* Initialize TG3_BDINFO's at:
10139 * RCVDBDI_STD_BD: standard eth size rx ring
10140 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
10141 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
10144 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
10145 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
10146 * ring attribute flags
10147 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
10149 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10150 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10152 * The size of each ring is fixed in the firmware, but the location is
10155 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10156 ((u64) tpr->rx_std_mapping >> 32));
10157 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10158 ((u64) tpr->rx_std_mapping & 0xffffffff));
10159 if (!tg3_flag(tp, 5717_PLUS))
10160 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10161 NIC_SRAM_RX_BUFFER_DESC);
10163 /* Disable the mini ring */
10164 if (!tg3_flag(tp, 5705_PLUS))
10165 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10166 BDINFO_FLAGS_DISABLED);
10168 /* Program the jumbo buffer descriptor ring control
10169 * blocks on those devices that have them.
10171 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10172 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10174 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10175 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10176 ((u64) tpr->rx_jmb_mapping >> 32));
10177 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10178 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10179 val = TG3_RX_JMB_RING_SIZE(tp) <<
10180 BDINFO_FLAGS_MAXLEN_SHIFT;
10181 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10182 val | BDINFO_FLAGS_USE_EXT_RECV);
10183 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10184 tg3_flag(tp, 57765_CLASS) ||
10185 tg3_asic_rev(tp) == ASIC_REV_5762)
10186 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10187 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10189 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10190 BDINFO_FLAGS_DISABLED);
10193 if (tg3_flag(tp, 57765_PLUS)) {
10194 val = TG3_RX_STD_RING_SIZE(tp);
10195 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10196 val |= (TG3_RX_STD_DMA_SZ << 2);
10198 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10200 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10202 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10204 tpr->rx_std_prod_idx = tp->rx_pending;
10205 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10207 tpr->rx_jmb_prod_idx =
10208 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10209 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10211 tg3_rings_reset(tp);
10213 /* Initialize MAC address and backoff seed. */
10214 __tg3_set_mac_addr(tp, false);
10216 /* MTU + ethernet header + FCS + optional VLAN tag */
10217 tw32(MAC_RX_MTU_SIZE,
10218 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10220 /* The slot time is changed by tg3_setup_phy if we
10221 * run at gigabit with half duplex.
10223 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10224 (6 << TX_LENGTHS_IPG_SHIFT) |
10225 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10227 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10228 tg3_asic_rev(tp) == ASIC_REV_5762)
10229 val |= tr32(MAC_TX_LENGTHS) &
10230 (TX_LENGTHS_JMB_FRM_LEN_MSK |
10231 TX_LENGTHS_CNT_DWN_VAL_MSK);
10233 tw32(MAC_TX_LENGTHS, val);
10235 /* Receive rules. */
10236 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10237 tw32(RCVLPC_CONFIG, 0x0181);
10239 /* Calculate RDMAC_MODE setting early, we need it to determine
10240 * the RCVLPC_STATE_ENABLE mask.
10242 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10243 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10244 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10245 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10246 RDMAC_MODE_LNGREAD_ENAB);
10248 if (tg3_asic_rev(tp) == ASIC_REV_5717)
10249 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10251 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10252 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10253 tg3_asic_rev(tp) == ASIC_REV_57780)
10254 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10255 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10256 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10258 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10259 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10260 if (tg3_flag(tp, TSO_CAPABLE) &&
10261 tg3_asic_rev(tp) == ASIC_REV_5705) {
10262 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10263 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10264 !tg3_flag(tp, IS_5788)) {
10265 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10269 if (tg3_flag(tp, PCI_EXPRESS))
10270 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10272 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10274 if (tp->dev->mtu <= ETH_DATA_LEN) {
10275 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10276 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10280 if (tg3_flag(tp, HW_TSO_1) ||
10281 tg3_flag(tp, HW_TSO_2) ||
10282 tg3_flag(tp, HW_TSO_3))
10283 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10285 if (tg3_flag(tp, 57765_PLUS) ||
10286 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10287 tg3_asic_rev(tp) == ASIC_REV_57780)
10288 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10290 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10291 tg3_asic_rev(tp) == ASIC_REV_5762)
10292 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10294 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10295 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10296 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10297 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10298 tg3_flag(tp, 57765_PLUS)) {
10301 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10302 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10304 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10306 val = tr32(tgtreg);
10307 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10308 tg3_asic_rev(tp) == ASIC_REV_5762) {
10309 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10310 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10311 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10312 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10313 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10314 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10316 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10319 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10320 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10321 tg3_asic_rev(tp) == ASIC_REV_5762) {
10324 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10325 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10327 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10329 val = tr32(tgtreg);
10331 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10332 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10335 /* Receive/send statistics. */
10336 if (tg3_flag(tp, 5750_PLUS)) {
10337 val = tr32(RCVLPC_STATS_ENABLE);
10338 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10339 tw32(RCVLPC_STATS_ENABLE, val);
10340 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10341 tg3_flag(tp, TSO_CAPABLE)) {
10342 val = tr32(RCVLPC_STATS_ENABLE);
10343 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10344 tw32(RCVLPC_STATS_ENABLE, val);
10346 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10348 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10349 tw32(SNDDATAI_STATSENAB, 0xffffff);
10350 tw32(SNDDATAI_STATSCTRL,
10351 (SNDDATAI_SCTRL_ENABLE |
10352 SNDDATAI_SCTRL_FASTUPD));
10354 /* Setup host coalescing engine. */
10355 tw32(HOSTCC_MODE, 0);
10356 for (i = 0; i < 2000; i++) {
10357 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10362 __tg3_set_coalesce(tp, &tp->coal);
10364 if (!tg3_flag(tp, 5705_PLUS)) {
10365 /* Status/statistics block address. See tg3_timer,
10366 * the tg3_periodic_fetch_stats call there, and
10367 * tg3_get_stats to see how this works for 5705/5750 chips.
10369 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10370 ((u64) tp->stats_mapping >> 32));
10371 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10372 ((u64) tp->stats_mapping & 0xffffffff));
10373 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10375 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10377 /* Clear statistics and status block memory areas */
10378 for (i = NIC_SRAM_STATS_BLK;
10379 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10380 i += sizeof(u32)) {
10381 tg3_write_mem(tp, i, 0);
10386 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10388 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10389 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10390 if (!tg3_flag(tp, 5705_PLUS))
10391 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10393 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10394 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10395 /* reset to prevent losing 1st rx packet intermittently */
10396 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10400 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10401 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10402 MAC_MODE_FHDE_ENABLE;
10403 if (tg3_flag(tp, ENABLE_APE))
10404 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10405 if (!tg3_flag(tp, 5705_PLUS) &&
10406 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10407 tg3_asic_rev(tp) != ASIC_REV_5700)
10408 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10409 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10412 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10413 * If TG3_FLAG_IS_NIC is zero, we should read the
10414 * register to preserve the GPIO settings for LOMs. The GPIOs,
10415 * whether used as inputs or outputs, are set by boot code after
10418 if (!tg3_flag(tp, IS_NIC)) {
10421 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10422 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10423 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10425 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10426 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10427 GRC_LCLCTRL_GPIO_OUTPUT3;
10429 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10430 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10432 tp->grc_local_ctrl &= ~gpio_mask;
10433 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10435 /* GPIO1 must be driven high for eeprom write protect */
10436 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10437 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10438 GRC_LCLCTRL_GPIO_OUTPUT1);
10440 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10443 if (tg3_flag(tp, USING_MSIX)) {
10444 val = tr32(MSGINT_MODE);
10445 val |= MSGINT_MODE_ENABLE;
10446 if (tp->irq_cnt > 1)
10447 val |= MSGINT_MODE_MULTIVEC_EN;
10448 if (!tg3_flag(tp, 1SHOT_MSI))
10449 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10450 tw32(MSGINT_MODE, val);
10453 if (!tg3_flag(tp, 5705_PLUS)) {
10454 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10458 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10459 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10460 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10461 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10462 WDMAC_MODE_LNGREAD_ENAB);
10464 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10465 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10466 if (tg3_flag(tp, TSO_CAPABLE) &&
10467 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10468 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10470 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10471 !tg3_flag(tp, IS_5788)) {
10472 val |= WDMAC_MODE_RX_ACCEL;
10476 /* Enable host coalescing bug fix */
10477 if (tg3_flag(tp, 5755_PLUS))
10478 val |= WDMAC_MODE_STATUS_TAG_FIX;
10480 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10481 val |= WDMAC_MODE_BURST_ALL_DATA;
10483 tw32_f(WDMAC_MODE, val);
10486 if (tg3_flag(tp, PCIX_MODE)) {
10489 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10491 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10492 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10493 pcix_cmd |= PCI_X_CMD_READ_2K;
10494 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10495 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10496 pcix_cmd |= PCI_X_CMD_READ_2K;
10498 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10502 tw32_f(RDMAC_MODE, rdmac_mode);
10505 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10506 tg3_asic_rev(tp) == ASIC_REV_5720) {
10507 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10508 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10511 if (i < TG3_NUM_RDMA_CHANNELS) {
10512 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10513 val |= tg3_lso_rd_dma_workaround_bit(tp);
10514 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10515 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10519 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10520 if (!tg3_flag(tp, 5705_PLUS))
10521 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10523 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10524 tw32(SNDDATAC_MODE,
10525 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10527 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10529 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10530 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10531 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10532 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10533 val |= RCVDBDI_MODE_LRG_RING_SZ;
10534 tw32(RCVDBDI_MODE, val);
10535 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10536 if (tg3_flag(tp, HW_TSO_1) ||
10537 tg3_flag(tp, HW_TSO_2) ||
10538 tg3_flag(tp, HW_TSO_3))
10539 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10540 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10541 if (tg3_flag(tp, ENABLE_TSS))
10542 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10543 tw32(SNDBDI_MODE, val);
10544 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10546 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10547 err = tg3_load_5701_a0_firmware_fix(tp);
10552 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10553 /* Ignore any errors for the firmware download. If download
10554 * fails, the device will operate with EEE disabled
10556 tg3_load_57766_firmware(tp);
10559 if (tg3_flag(tp, TSO_CAPABLE)) {
10560 err = tg3_load_tso_firmware(tp);
10565 tp->tx_mode = TX_MODE_ENABLE;
10567 if (tg3_flag(tp, 5755_PLUS) ||
10568 tg3_asic_rev(tp) == ASIC_REV_5906)
10569 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10571 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10572 tg3_asic_rev(tp) == ASIC_REV_5762) {
10573 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10574 tp->tx_mode &= ~val;
10575 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10578 tw32_f(MAC_TX_MODE, tp->tx_mode);
10581 if (tg3_flag(tp, ENABLE_RSS)) {
10584 tg3_rss_write_indir_tbl(tp);
10586 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10588 for (i = 0; i < 10 ; i++)
10589 tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10592 tp->rx_mode = RX_MODE_ENABLE;
10593 if (tg3_flag(tp, 5755_PLUS))
10594 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10596 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10597 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10599 if (tg3_flag(tp, ENABLE_RSS))
10600 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10601 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10602 RX_MODE_RSS_IPV6_HASH_EN |
10603 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10604 RX_MODE_RSS_IPV4_HASH_EN |
10605 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10607 tw32_f(MAC_RX_MODE, tp->rx_mode);
10610 tw32(MAC_LED_CTRL, tp->led_ctrl);
10612 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10613 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10614 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10617 tw32_f(MAC_RX_MODE, tp->rx_mode);
10620 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10621 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10622 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10623 /* Set drive transmission level to 1.2V */
10624 /* only if the signal pre-emphasis bit is not set */
10625 val = tr32(MAC_SERDES_CFG);
10628 tw32(MAC_SERDES_CFG, val);
10630 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10631 tw32(MAC_SERDES_CFG, 0x616000);
10634 /* Prevent chip from dropping frames when flow control
10637 if (tg3_flag(tp, 57765_CLASS))
10641 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10643 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10644 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10645 /* Use hardware link auto-negotiation */
10646 tg3_flag_set(tp, HW_AUTONEG);
10649 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10650 tg3_asic_rev(tp) == ASIC_REV_5714) {
10653 tmp = tr32(SERDES_RX_CTRL);
10654 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10655 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10656 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10657 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10660 if (!tg3_flag(tp, USE_PHYLIB)) {
10661 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10662 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10664 err = tg3_setup_phy(tp, false);
10668 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10669 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10672 /* Clear CRC stats. */
10673 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10674 tg3_writephy(tp, MII_TG3_TEST1,
10675 tmp | MII_TG3_TEST1_CRC_EN);
10676 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10681 __tg3_set_rx_mode(tp->dev);
10683 /* Initialize receive rules. */
10684 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10685 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10686 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10687 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10689 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10693 if (tg3_flag(tp, ENABLE_ASF))
10697 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10699 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10701 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10703 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10705 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10707 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10709 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10711 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10713 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10715 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10717 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10719 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10721 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10723 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10731 if (tg3_flag(tp, ENABLE_APE))
10732 /* Write our heartbeat update interval to APE. */
10733 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10734 APE_HOST_HEARTBEAT_INT_DISABLE);
10736 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10741 /* Called at device open time to get the chip ready for
10742 * packet processing. Invoked with tp->lock held.
10744 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10746 /* Chip may have been just powered on. If so, the boot code may still
10747 * be running initialization. Wait for it to finish to avoid races in
10748 * accessing the hardware.
10750 tg3_enable_register_access(tp);
10753 tg3_switch_clocks(tp);
10755 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10757 return tg3_reset_hw(tp, reset_phy);
10760 #ifdef CONFIG_TIGON3_HWMON
10761 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10765 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10766 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10768 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10771 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10772 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10773 memset(ocir, 0, TG3_OCIR_LEN);
10777 /* sysfs attributes for hwmon */
10778 static ssize_t tg3_show_temp(struct device *dev,
10779 struct device_attribute *devattr, char *buf)
10781 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10782 struct tg3 *tp = dev_get_drvdata(dev);
10785 spin_lock_bh(&tp->lock);
10786 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10787 sizeof(temperature));
10788 spin_unlock_bh(&tp->lock);
10789 return sprintf(buf, "%u\n", temperature * 1000);
10793 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10794 TG3_TEMP_SENSOR_OFFSET);
10795 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10796 TG3_TEMP_CAUTION_OFFSET);
10797 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10798 TG3_TEMP_MAX_OFFSET);
10800 static struct attribute *tg3_attrs[] = {
10801 &sensor_dev_attr_temp1_input.dev_attr.attr,
10802 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10803 &sensor_dev_attr_temp1_max.dev_attr.attr,
10806 ATTRIBUTE_GROUPS(tg3);
10808 static void tg3_hwmon_close(struct tg3 *tp)
10810 if (tp->hwmon_dev) {
10811 hwmon_device_unregister(tp->hwmon_dev);
10812 tp->hwmon_dev = NULL;
10816 static void tg3_hwmon_open(struct tg3 *tp)
10820 struct pci_dev *pdev = tp->pdev;
10821 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10823 tg3_sd_scan_scratchpad(tp, ocirs);
10825 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10826 if (!ocirs[i].src_data_length)
10829 size += ocirs[i].src_hdr_length;
10830 size += ocirs[i].src_data_length;
10836 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10838 if (IS_ERR(tp->hwmon_dev)) {
10839 tp->hwmon_dev = NULL;
10840 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10844 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10845 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10846 #endif /* CONFIG_TIGON3_HWMON */
10849 #define TG3_STAT_ADD32(PSTAT, REG) \
10850 do { u32 __val = tr32(REG); \
10851 (PSTAT)->low += __val; \
10852 if ((PSTAT)->low < __val) \
10853 (PSTAT)->high += 1; \
10856 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10858 struct tg3_hw_stats *sp = tp->hw_stats;
10863 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10864 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10865 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10866 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10867 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10868 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10869 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10870 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10871 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10872 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10873 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10874 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10875 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10876 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10877 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10878 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10881 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10882 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10883 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10884 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10887 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10888 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10889 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10890 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10891 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10892 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10893 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10894 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10895 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10896 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10897 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10898 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10899 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10900 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10902 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10903 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10904 tg3_asic_rev(tp) != ASIC_REV_5762 &&
10905 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10906 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10907 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10909 u32 val = tr32(HOSTCC_FLOW_ATTN);
10910 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10912 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10913 sp->rx_discards.low += val;
10914 if (sp->rx_discards.low < val)
10915 sp->rx_discards.high += 1;
10917 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10919 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10922 static void tg3_chk_missed_msi(struct tg3 *tp)
10926 for (i = 0; i < tp->irq_cnt; i++) {
10927 struct tg3_napi *tnapi = &tp->napi[i];
10929 if (tg3_has_work(tnapi)) {
10930 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10931 tnapi->last_tx_cons == tnapi->tx_cons) {
10932 if (tnapi->chk_msi_cnt < 1) {
10933 tnapi->chk_msi_cnt++;
10939 tnapi->chk_msi_cnt = 0;
10940 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10941 tnapi->last_tx_cons = tnapi->tx_cons;
10945 static void tg3_timer(unsigned long __opaque)
10947 struct tg3 *tp = (struct tg3 *) __opaque;
10949 spin_lock(&tp->lock);
10951 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10952 spin_unlock(&tp->lock);
10953 goto restart_timer;
10956 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10957 tg3_flag(tp, 57765_CLASS))
10958 tg3_chk_missed_msi(tp);
10960 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10961 /* BCM4785: Flush posted writes from GbE to host memory. */
10965 if (!tg3_flag(tp, TAGGED_STATUS)) {
10966 /* All of this garbage is because when using non-tagged
10967 * IRQ status the mailbox/status_block protocol the chip
10968 * uses with the cpu is race prone.
10970 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10971 tw32(GRC_LOCAL_CTRL,
10972 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10974 tw32(HOSTCC_MODE, tp->coalesce_mode |
10975 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10978 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10979 spin_unlock(&tp->lock);
10980 tg3_reset_task_schedule(tp);
10981 goto restart_timer;
10985 /* This part only runs once per second. */
10986 if (!--tp->timer_counter) {
10987 if (tg3_flag(tp, 5705_PLUS))
10988 tg3_periodic_fetch_stats(tp);
10990 if (tp->setlpicnt && !--tp->setlpicnt)
10991 tg3_phy_eee_enable(tp);
10993 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10997 mac_stat = tr32(MAC_STATUS);
11000 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11001 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11003 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11007 tg3_setup_phy(tp, false);
11008 } else if (tg3_flag(tp, POLL_SERDES)) {
11009 u32 mac_stat = tr32(MAC_STATUS);
11010 int need_setup = 0;
11013 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11016 if (!tp->link_up &&
11017 (mac_stat & (MAC_STATUS_PCS_SYNCED |
11018 MAC_STATUS_SIGNAL_DET))) {
11022 if (!tp->serdes_counter) {
11025 ~MAC_MODE_PORT_MODE_MASK));
11027 tw32_f(MAC_MODE, tp->mac_mode);
11030 tg3_setup_phy(tp, false);
11032 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11033 tg3_flag(tp, 5780_CLASS)) {
11034 tg3_serdes_parallel_detect(tp);
11035 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11036 u32 cpmu = tr32(TG3_CPMU_STATUS);
11037 bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11038 TG3_CPMU_STATUS_LINK_MASK);
11040 if (link_up != tp->link_up)
11041 tg3_setup_phy(tp, false);
11044 tp->timer_counter = tp->timer_multiplier;
11047 /* Heartbeat is only sent once every 2 seconds.
11049 * The heartbeat is to tell the ASF firmware that the host
11050 * driver is still alive. In the event that the OS crashes,
11051 * ASF needs to reset the hardware to free up the FIFO space
11052 * that may be filled with rx packets destined for the host.
11053 * If the FIFO is full, ASF will no longer function properly.
11055 * Unintended resets have been reported on real time kernels
11056 * where the timer doesn't run on time. Netpoll will also have
11059 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11060 * to check the ring condition when the heartbeat is expiring
11061 * before doing the reset. This will prevent most unintended
11064 if (!--tp->asf_counter) {
11065 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11066 tg3_wait_for_event_ack(tp);
11068 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11069 FWCMD_NICDRV_ALIVE3);
11070 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11071 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11072 TG3_FW_UPDATE_TIMEOUT_SEC);
11074 tg3_generate_fw_event(tp);
11076 tp->asf_counter = tp->asf_multiplier;
11079 spin_unlock(&tp->lock);
11082 tp->timer.expires = jiffies + tp->timer_offset;
11083 add_timer(&tp->timer);
11086 static void tg3_timer_init(struct tg3 *tp)
11088 if (tg3_flag(tp, TAGGED_STATUS) &&
11089 tg3_asic_rev(tp) != ASIC_REV_5717 &&
11090 !tg3_flag(tp, 57765_CLASS))
11091 tp->timer_offset = HZ;
11093 tp->timer_offset = HZ / 10;
11095 BUG_ON(tp->timer_offset > HZ);
11097 tp->timer_multiplier = (HZ / tp->timer_offset);
11098 tp->asf_multiplier = (HZ / tp->timer_offset) *
11099 TG3_FW_UPDATE_FREQ_SEC;
11101 init_timer(&tp->timer);
11102 tp->timer.data = (unsigned long) tp;
11103 tp->timer.function = tg3_timer;
11106 static void tg3_timer_start(struct tg3 *tp)
11108 tp->asf_counter = tp->asf_multiplier;
11109 tp->timer_counter = tp->timer_multiplier;
11111 tp->timer.expires = jiffies + tp->timer_offset;
11112 add_timer(&tp->timer);
11115 static void tg3_timer_stop(struct tg3 *tp)
11117 del_timer_sync(&tp->timer);
11120 /* Restart hardware after configuration changes, self-test, etc.
11121 * Invoked with tp->lock held.
11123 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11124 __releases(tp->lock)
11125 __acquires(tp->lock)
11129 err = tg3_init_hw(tp, reset_phy);
11131 netdev_err(tp->dev,
11132 "Failed to re-initialize device, aborting\n");
11133 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11134 tg3_full_unlock(tp);
11135 tg3_timer_stop(tp);
11137 tg3_napi_enable(tp);
11138 dev_close(tp->dev);
11139 tg3_full_lock(tp, 0);
11144 static void tg3_reset_task(struct work_struct *work)
11146 struct tg3 *tp = container_of(work, struct tg3, reset_task);
11150 tg3_full_lock(tp, 0);
11152 if (!netif_running(tp->dev)) {
11153 tg3_flag_clear(tp, RESET_TASK_PENDING);
11154 tg3_full_unlock(tp);
11159 tg3_full_unlock(tp);
11163 tg3_netif_stop(tp);
11165 tg3_full_lock(tp, 1);
11167 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11168 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11169 tp->write32_rx_mbox = tg3_write_flush_reg32;
11170 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11171 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11174 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11175 err = tg3_init_hw(tp, true);
11177 tg3_full_unlock(tp);
11179 tg3_napi_enable(tp);
11180 /* Clear this flag so that tg3_reset_task_cancel() will not
11181 * call cancel_work_sync() and wait forever.
11183 tg3_flag_clear(tp, RESET_TASK_PENDING);
11184 dev_close(tp->dev);
11188 tg3_netif_start(tp);
11190 tg3_full_unlock(tp);
11195 tg3_flag_clear(tp, RESET_TASK_PENDING);
11200 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11203 unsigned long flags;
11205 struct tg3_napi *tnapi = &tp->napi[irq_num];
11207 if (tp->irq_cnt == 1)
11208 name = tp->dev->name;
11210 name = &tnapi->irq_lbl[0];
11211 if (tnapi->tx_buffers && tnapi->rx_rcb)
11212 snprintf(name, IFNAMSIZ,
11213 "%s-txrx-%d", tp->dev->name, irq_num);
11214 else if (tnapi->tx_buffers)
11215 snprintf(name, IFNAMSIZ,
11216 "%s-tx-%d", tp->dev->name, irq_num);
11217 else if (tnapi->rx_rcb)
11218 snprintf(name, IFNAMSIZ,
11219 "%s-rx-%d", tp->dev->name, irq_num);
11221 snprintf(name, IFNAMSIZ,
11222 "%s-%d", tp->dev->name, irq_num);
11223 name[IFNAMSIZ-1] = 0;
11226 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11228 if (tg3_flag(tp, 1SHOT_MSI))
11229 fn = tg3_msi_1shot;
11232 fn = tg3_interrupt;
11233 if (tg3_flag(tp, TAGGED_STATUS))
11234 fn = tg3_interrupt_tagged;
11235 flags = IRQF_SHARED;
11238 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11241 static int tg3_test_interrupt(struct tg3 *tp)
11243 struct tg3_napi *tnapi = &tp->napi[0];
11244 struct net_device *dev = tp->dev;
11245 int err, i, intr_ok = 0;
11248 if (!netif_running(dev))
11251 tg3_disable_ints(tp);
11253 free_irq(tnapi->irq_vec, tnapi);
11256 * Turn off MSI one shot mode. Otherwise this test has no
11257 * observable way to know whether the interrupt was delivered.
11259 if (tg3_flag(tp, 57765_PLUS)) {
11260 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11261 tw32(MSGINT_MODE, val);
11264 err = request_irq(tnapi->irq_vec, tg3_test_isr,
11265 IRQF_SHARED, dev->name, tnapi);
11269 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11270 tg3_enable_ints(tp);
11272 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11275 for (i = 0; i < 5; i++) {
11276 u32 int_mbox, misc_host_ctrl;
11278 int_mbox = tr32_mailbox(tnapi->int_mbox);
11279 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11281 if ((int_mbox != 0) ||
11282 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11287 if (tg3_flag(tp, 57765_PLUS) &&
11288 tnapi->hw_status->status_tag != tnapi->last_tag)
11289 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11294 tg3_disable_ints(tp);
11296 free_irq(tnapi->irq_vec, tnapi);
11298 err = tg3_request_irq(tp, 0);
11304 /* Reenable MSI one shot mode. */
11305 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11306 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11307 tw32(MSGINT_MODE, val);
11315 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11316 * successfully restored
11318 static int tg3_test_msi(struct tg3 *tp)
11323 if (!tg3_flag(tp, USING_MSI))
11326 /* Turn off SERR reporting in case MSI terminates with Master
11329 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11330 pci_write_config_word(tp->pdev, PCI_COMMAND,
11331 pci_cmd & ~PCI_COMMAND_SERR);
11333 err = tg3_test_interrupt(tp);
11335 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11340 /* other failures */
11344 /* MSI test failed, go back to INTx mode */
11345 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11346 "to INTx mode. Please report this failure to the PCI "
11347 "maintainer and include system chipset information\n");
11349 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11351 pci_disable_msi(tp->pdev);
11353 tg3_flag_clear(tp, USING_MSI);
11354 tp->napi[0].irq_vec = tp->pdev->irq;
11356 err = tg3_request_irq(tp, 0);
11360 /* Need to reset the chip because the MSI cycle may have terminated
11361 * with Master Abort.
11363 tg3_full_lock(tp, 1);
11365 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11366 err = tg3_init_hw(tp, true);
11368 tg3_full_unlock(tp);
11371 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11376 static int tg3_request_firmware(struct tg3 *tp)
11378 const struct tg3_firmware_hdr *fw_hdr;
11380 if (reject_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11381 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11386 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11388 /* Firmware blob starts with version numbers, followed by
11389 * start address and _full_ length including BSS sections
11390 * (which must be longer than the actual data, of course
11393 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11394 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11395 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11396 tp->fw_len, tp->fw_needed);
11397 release_firmware(tp->fw);
11402 /* We no longer need firmware; we have it. */
11403 tp->fw_needed = NULL;
11407 static u32 tg3_irq_count(struct tg3 *tp)
11409 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11412 /* We want as many rx rings enabled as there are cpus.
11413 * In multiqueue MSI-X mode, the first MSI-X vector
11414 * only deals with link interrupts, etc, so we add
11415 * one to the number of vectors we are requesting.
11417 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11423 static bool tg3_enable_msix(struct tg3 *tp)
11426 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11428 tp->txq_cnt = tp->txq_req;
11429 tp->rxq_cnt = tp->rxq_req;
11431 tp->rxq_cnt = netif_get_num_default_rss_queues();
11432 if (tp->rxq_cnt > tp->rxq_max)
11433 tp->rxq_cnt = tp->rxq_max;
11435 /* Disable multiple TX rings by default. Simple round-robin hardware
11436 * scheduling of the TX rings can cause starvation of rings with
11437 * small packets when other rings have TSO or jumbo packets.
11442 tp->irq_cnt = tg3_irq_count(tp);
11444 for (i = 0; i < tp->irq_max; i++) {
11445 msix_ent[i].entry = i;
11446 msix_ent[i].vector = 0;
11449 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11452 } else if (rc < tp->irq_cnt) {
11453 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11456 tp->rxq_cnt = max(rc - 1, 1);
11458 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11461 for (i = 0; i < tp->irq_max; i++)
11462 tp->napi[i].irq_vec = msix_ent[i].vector;
11464 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11465 pci_disable_msix(tp->pdev);
11469 if (tp->irq_cnt == 1)
11472 tg3_flag_set(tp, ENABLE_RSS);
11474 if (tp->txq_cnt > 1)
11475 tg3_flag_set(tp, ENABLE_TSS);
11477 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11482 static void tg3_ints_init(struct tg3 *tp)
11484 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11485 !tg3_flag(tp, TAGGED_STATUS)) {
11486 /* All MSI supporting chips should support tagged
11487 * status. Assert that this is the case.
11489 netdev_warn(tp->dev,
11490 "MSI without TAGGED_STATUS? Not using MSI\n");
11494 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11495 tg3_flag_set(tp, USING_MSIX);
11496 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11497 tg3_flag_set(tp, USING_MSI);
11499 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11500 u32 msi_mode = tr32(MSGINT_MODE);
11501 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11502 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11503 if (!tg3_flag(tp, 1SHOT_MSI))
11504 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11505 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11508 if (!tg3_flag(tp, USING_MSIX)) {
11510 tp->napi[0].irq_vec = tp->pdev->irq;
11513 if (tp->irq_cnt == 1) {
11516 netif_set_real_num_tx_queues(tp->dev, 1);
11517 netif_set_real_num_rx_queues(tp->dev, 1);
11521 static void tg3_ints_fini(struct tg3 *tp)
11523 if (tg3_flag(tp, USING_MSIX))
11524 pci_disable_msix(tp->pdev);
11525 else if (tg3_flag(tp, USING_MSI))
11526 pci_disable_msi(tp->pdev);
11527 tg3_flag_clear(tp, USING_MSI);
11528 tg3_flag_clear(tp, USING_MSIX);
11529 tg3_flag_clear(tp, ENABLE_RSS);
11530 tg3_flag_clear(tp, ENABLE_TSS);
11533 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11536 struct net_device *dev = tp->dev;
11540 * Setup interrupts first so we know how
11541 * many NAPI resources to allocate
11545 tg3_rss_check_indir_tbl(tp);
11547 /* The placement of this call is tied
11548 * to the setup and use of Host TX descriptors.
11550 err = tg3_alloc_consistent(tp);
11552 goto out_ints_fini;
11556 tg3_napi_enable(tp);
11558 for (i = 0; i < tp->irq_cnt; i++) {
11559 err = tg3_request_irq(tp, i);
11561 for (i--; i >= 0; i--) {
11562 struct tg3_napi *tnapi = &tp->napi[i];
11564 free_irq(tnapi->irq_vec, tnapi);
11566 goto out_napi_fini;
11570 tg3_full_lock(tp, 0);
11573 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11575 err = tg3_init_hw(tp, reset_phy);
11577 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11578 tg3_free_rings(tp);
11581 tg3_full_unlock(tp);
11586 if (test_irq && tg3_flag(tp, USING_MSI)) {
11587 err = tg3_test_msi(tp);
11590 tg3_full_lock(tp, 0);
11591 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11592 tg3_free_rings(tp);
11593 tg3_full_unlock(tp);
11595 goto out_napi_fini;
11598 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11599 u32 val = tr32(PCIE_TRANSACTION_CFG);
11601 tw32(PCIE_TRANSACTION_CFG,
11602 val | PCIE_TRANS_CFG_1SHOT_MSI);
11608 tg3_hwmon_open(tp);
11610 tg3_full_lock(tp, 0);
11612 tg3_timer_start(tp);
11613 tg3_flag_set(tp, INIT_COMPLETE);
11614 tg3_enable_ints(tp);
11616 tg3_ptp_resume(tp);
11618 tg3_full_unlock(tp);
11620 netif_tx_start_all_queues(dev);
11623 * Reset loopback feature if it was turned on while the device was down
11624 * make sure that it's installed properly now.
11626 if (dev->features & NETIF_F_LOOPBACK)
11627 tg3_set_loopback(dev, dev->features);
11632 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11633 struct tg3_napi *tnapi = &tp->napi[i];
11634 free_irq(tnapi->irq_vec, tnapi);
11638 tg3_napi_disable(tp);
11640 tg3_free_consistent(tp);
11648 static void tg3_stop(struct tg3 *tp)
11652 tg3_reset_task_cancel(tp);
11653 tg3_netif_stop(tp);
11655 tg3_timer_stop(tp);
11657 tg3_hwmon_close(tp);
11661 tg3_full_lock(tp, 1);
11663 tg3_disable_ints(tp);
11665 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11666 tg3_free_rings(tp);
11667 tg3_flag_clear(tp, INIT_COMPLETE);
11669 tg3_full_unlock(tp);
11671 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11672 struct tg3_napi *tnapi = &tp->napi[i];
11673 free_irq(tnapi->irq_vec, tnapi);
11680 tg3_free_consistent(tp);
11683 static int tg3_open(struct net_device *dev)
11685 struct tg3 *tp = netdev_priv(dev);
11688 if (tp->pcierr_recovery) {
11689 netdev_err(dev, "Failed to open device. PCI error recovery "
11694 if (tp->fw_needed) {
11695 err = tg3_request_firmware(tp);
11696 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11698 netdev_warn(tp->dev, "EEE capability disabled\n");
11699 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11700 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11701 netdev_warn(tp->dev, "EEE capability restored\n");
11702 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11704 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11708 netdev_warn(tp->dev, "TSO capability disabled\n");
11709 tg3_flag_clear(tp, TSO_CAPABLE);
11710 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11711 netdev_notice(tp->dev, "TSO capability restored\n");
11712 tg3_flag_set(tp, TSO_CAPABLE);
11716 tg3_carrier_off(tp);
11718 err = tg3_power_up(tp);
11722 tg3_full_lock(tp, 0);
11724 tg3_disable_ints(tp);
11725 tg3_flag_clear(tp, INIT_COMPLETE);
11727 tg3_full_unlock(tp);
11729 err = tg3_start(tp,
11730 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11733 tg3_frob_aux_power(tp, false);
11734 pci_set_power_state(tp->pdev, PCI_D3hot);
11740 static int tg3_close(struct net_device *dev)
11742 struct tg3 *tp = netdev_priv(dev);
11744 if (tp->pcierr_recovery) {
11745 netdev_err(dev, "Failed to close device. PCI error recovery "
11752 if (pci_device_is_present(tp->pdev)) {
11753 tg3_power_down_prepare(tp);
11755 tg3_carrier_off(tp);
11760 static inline u64 get_stat64(tg3_stat64_t *val)
11762 return ((u64)val->high << 32) | ((u64)val->low);
11765 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11767 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11769 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11770 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11771 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11774 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11775 tg3_writephy(tp, MII_TG3_TEST1,
11776 val | MII_TG3_TEST1_CRC_EN);
11777 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11781 tp->phy_crc_errors += val;
11783 return tp->phy_crc_errors;
11786 return get_stat64(&hw_stats->rx_fcs_errors);
11789 #define ESTAT_ADD(member) \
11790 estats->member = old_estats->member + \
11791 get_stat64(&hw_stats->member)
11793 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11795 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11796 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11798 ESTAT_ADD(rx_octets);
11799 ESTAT_ADD(rx_fragments);
11800 ESTAT_ADD(rx_ucast_packets);
11801 ESTAT_ADD(rx_mcast_packets);
11802 ESTAT_ADD(rx_bcast_packets);
11803 ESTAT_ADD(rx_fcs_errors);
11804 ESTAT_ADD(rx_align_errors);
11805 ESTAT_ADD(rx_xon_pause_rcvd);
11806 ESTAT_ADD(rx_xoff_pause_rcvd);
11807 ESTAT_ADD(rx_mac_ctrl_rcvd);
11808 ESTAT_ADD(rx_xoff_entered);
11809 ESTAT_ADD(rx_frame_too_long_errors);
11810 ESTAT_ADD(rx_jabbers);
11811 ESTAT_ADD(rx_undersize_packets);
11812 ESTAT_ADD(rx_in_length_errors);
11813 ESTAT_ADD(rx_out_length_errors);
11814 ESTAT_ADD(rx_64_or_less_octet_packets);
11815 ESTAT_ADD(rx_65_to_127_octet_packets);
11816 ESTAT_ADD(rx_128_to_255_octet_packets);
11817 ESTAT_ADD(rx_256_to_511_octet_packets);
11818 ESTAT_ADD(rx_512_to_1023_octet_packets);
11819 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11820 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11821 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11822 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11823 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11825 ESTAT_ADD(tx_octets);
11826 ESTAT_ADD(tx_collisions);
11827 ESTAT_ADD(tx_xon_sent);
11828 ESTAT_ADD(tx_xoff_sent);
11829 ESTAT_ADD(tx_flow_control);
11830 ESTAT_ADD(tx_mac_errors);
11831 ESTAT_ADD(tx_single_collisions);
11832 ESTAT_ADD(tx_mult_collisions);
11833 ESTAT_ADD(tx_deferred);
11834 ESTAT_ADD(tx_excessive_collisions);
11835 ESTAT_ADD(tx_late_collisions);
11836 ESTAT_ADD(tx_collide_2times);
11837 ESTAT_ADD(tx_collide_3times);
11838 ESTAT_ADD(tx_collide_4times);
11839 ESTAT_ADD(tx_collide_5times);
11840 ESTAT_ADD(tx_collide_6times);
11841 ESTAT_ADD(tx_collide_7times);
11842 ESTAT_ADD(tx_collide_8times);
11843 ESTAT_ADD(tx_collide_9times);
11844 ESTAT_ADD(tx_collide_10times);
11845 ESTAT_ADD(tx_collide_11times);
11846 ESTAT_ADD(tx_collide_12times);
11847 ESTAT_ADD(tx_collide_13times);
11848 ESTAT_ADD(tx_collide_14times);
11849 ESTAT_ADD(tx_collide_15times);
11850 ESTAT_ADD(tx_ucast_packets);
11851 ESTAT_ADD(tx_mcast_packets);
11852 ESTAT_ADD(tx_bcast_packets);
11853 ESTAT_ADD(tx_carrier_sense_errors);
11854 ESTAT_ADD(tx_discards);
11855 ESTAT_ADD(tx_errors);
11857 ESTAT_ADD(dma_writeq_full);
11858 ESTAT_ADD(dma_write_prioq_full);
11859 ESTAT_ADD(rxbds_empty);
11860 ESTAT_ADD(rx_discards);
11861 ESTAT_ADD(rx_errors);
11862 ESTAT_ADD(rx_threshold_hit);
11864 ESTAT_ADD(dma_readq_full);
11865 ESTAT_ADD(dma_read_prioq_full);
11866 ESTAT_ADD(tx_comp_queue_full);
11868 ESTAT_ADD(ring_set_send_prod_index);
11869 ESTAT_ADD(ring_status_update);
11870 ESTAT_ADD(nic_irqs);
11871 ESTAT_ADD(nic_avoided_irqs);
11872 ESTAT_ADD(nic_tx_threshold_hit);
11874 ESTAT_ADD(mbuf_lwm_thresh_hit);
11877 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11879 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11880 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11882 stats->rx_packets = old_stats->rx_packets +
11883 get_stat64(&hw_stats->rx_ucast_packets) +
11884 get_stat64(&hw_stats->rx_mcast_packets) +
11885 get_stat64(&hw_stats->rx_bcast_packets);
11887 stats->tx_packets = old_stats->tx_packets +
11888 get_stat64(&hw_stats->tx_ucast_packets) +
11889 get_stat64(&hw_stats->tx_mcast_packets) +
11890 get_stat64(&hw_stats->tx_bcast_packets);
11892 stats->rx_bytes = old_stats->rx_bytes +
11893 get_stat64(&hw_stats->rx_octets);
11894 stats->tx_bytes = old_stats->tx_bytes +
11895 get_stat64(&hw_stats->tx_octets);
11897 stats->rx_errors = old_stats->rx_errors +
11898 get_stat64(&hw_stats->rx_errors);
11899 stats->tx_errors = old_stats->tx_errors +
11900 get_stat64(&hw_stats->tx_errors) +
11901 get_stat64(&hw_stats->tx_mac_errors) +
11902 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11903 get_stat64(&hw_stats->tx_discards);
11905 stats->multicast = old_stats->multicast +
11906 get_stat64(&hw_stats->rx_mcast_packets);
11907 stats->collisions = old_stats->collisions +
11908 get_stat64(&hw_stats->tx_collisions);
11910 stats->rx_length_errors = old_stats->rx_length_errors +
11911 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11912 get_stat64(&hw_stats->rx_undersize_packets);
11914 stats->rx_frame_errors = old_stats->rx_frame_errors +
11915 get_stat64(&hw_stats->rx_align_errors);
11916 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11917 get_stat64(&hw_stats->tx_discards);
11918 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11919 get_stat64(&hw_stats->tx_carrier_sense_errors);
11921 stats->rx_crc_errors = old_stats->rx_crc_errors +
11922 tg3_calc_crc_errors(tp);
11924 stats->rx_missed_errors = old_stats->rx_missed_errors +
11925 get_stat64(&hw_stats->rx_discards);
11927 stats->rx_dropped = tp->rx_dropped;
11928 stats->tx_dropped = tp->tx_dropped;
11931 static int tg3_get_regs_len(struct net_device *dev)
11933 return TG3_REG_BLK_SIZE;
11936 static void tg3_get_regs(struct net_device *dev,
11937 struct ethtool_regs *regs, void *_p)
11939 struct tg3 *tp = netdev_priv(dev);
11943 memset(_p, 0, TG3_REG_BLK_SIZE);
11945 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11948 tg3_full_lock(tp, 0);
11950 tg3_dump_legacy_regs(tp, (u32 *)_p);
11952 tg3_full_unlock(tp);
11955 static int tg3_get_eeprom_len(struct net_device *dev)
11957 struct tg3 *tp = netdev_priv(dev);
11959 return tp->nvram_size;
11962 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11964 struct tg3 *tp = netdev_priv(dev);
11965 int ret, cpmu_restore = 0;
11967 u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11970 if (tg3_flag(tp, NO_NVRAM))
11973 offset = eeprom->offset;
11977 eeprom->magic = TG3_EEPROM_MAGIC;
11979 /* Override clock, link aware and link idle modes */
11980 if (tg3_flag(tp, CPMU_PRESENT)) {
11981 cpmu_val = tr32(TG3_CPMU_CTRL);
11982 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
11983 CPMU_CTRL_LINK_IDLE_MODE)) {
11984 tw32(TG3_CPMU_CTRL, cpmu_val &
11985 ~(CPMU_CTRL_LINK_AWARE_MODE |
11986 CPMU_CTRL_LINK_IDLE_MODE));
11990 tg3_override_clk(tp);
11993 /* adjustments to start on required 4 byte boundary */
11994 b_offset = offset & 3;
11995 b_count = 4 - b_offset;
11996 if (b_count > len) {
11997 /* i.e. offset=1 len=2 */
12000 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12003 memcpy(data, ((char *)&val) + b_offset, b_count);
12006 eeprom->len += b_count;
12009 /* read bytes up to the last 4 byte boundary */
12010 pd = &data[eeprom->len];
12011 for (i = 0; i < (len - (len & 3)); i += 4) {
12012 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12019 memcpy(pd + i, &val, 4);
12020 if (need_resched()) {
12021 if (signal_pending(current)) {
12032 /* read last bytes not ending on 4 byte boundary */
12033 pd = &data[eeprom->len];
12035 b_offset = offset + len - b_count;
12036 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12039 memcpy(pd, &val, b_count);
12040 eeprom->len += b_count;
12045 /* Restore clock, link aware and link idle modes */
12046 tg3_restore_clk(tp);
12048 tw32(TG3_CPMU_CTRL, cpmu_val);
12053 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12055 struct tg3 *tp = netdev_priv(dev);
12057 u32 offset, len, b_offset, odd_len;
12059 __be32 start = 0, end;
12061 if (tg3_flag(tp, NO_NVRAM) ||
12062 eeprom->magic != TG3_EEPROM_MAGIC)
12065 offset = eeprom->offset;
12068 if ((b_offset = (offset & 3))) {
12069 /* adjustments to start on required 4 byte boundary */
12070 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12081 /* adjustments to end on required 4 byte boundary */
12083 len = (len + 3) & ~3;
12084 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12090 if (b_offset || odd_len) {
12091 buf = kmalloc(len, GFP_KERNEL);
12095 memcpy(buf, &start, 4);
12097 memcpy(buf+len-4, &end, 4);
12098 memcpy(buf + b_offset, data, eeprom->len);
12101 ret = tg3_nvram_write_block(tp, offset, len, buf);
12109 static int tg3_get_link_ksettings(struct net_device *dev,
12110 struct ethtool_link_ksettings *cmd)
12112 struct tg3 *tp = netdev_priv(dev);
12113 u32 supported, advertising;
12115 if (tg3_flag(tp, USE_PHYLIB)) {
12116 struct phy_device *phydev;
12117 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12119 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12120 phy_ethtool_ksettings_get(phydev, cmd);
12125 supported = (SUPPORTED_Autoneg);
12127 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12128 supported |= (SUPPORTED_1000baseT_Half |
12129 SUPPORTED_1000baseT_Full);
12131 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12132 supported |= (SUPPORTED_100baseT_Half |
12133 SUPPORTED_100baseT_Full |
12134 SUPPORTED_10baseT_Half |
12135 SUPPORTED_10baseT_Full |
12137 cmd->base.port = PORT_TP;
12139 supported |= SUPPORTED_FIBRE;
12140 cmd->base.port = PORT_FIBRE;
12142 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12145 advertising = tp->link_config.advertising;
12146 if (tg3_flag(tp, PAUSE_AUTONEG)) {
12147 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12148 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12149 advertising |= ADVERTISED_Pause;
12151 advertising |= ADVERTISED_Pause |
12152 ADVERTISED_Asym_Pause;
12154 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12155 advertising |= ADVERTISED_Asym_Pause;
12158 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12161 if (netif_running(dev) && tp->link_up) {
12162 cmd->base.speed = tp->link_config.active_speed;
12163 cmd->base.duplex = tp->link_config.active_duplex;
12164 ethtool_convert_legacy_u32_to_link_mode(
12165 cmd->link_modes.lp_advertising,
12166 tp->link_config.rmt_adv);
12168 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12169 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12170 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12172 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12175 cmd->base.speed = SPEED_UNKNOWN;
12176 cmd->base.duplex = DUPLEX_UNKNOWN;
12177 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12179 cmd->base.phy_address = tp->phy_addr;
12180 cmd->base.autoneg = tp->link_config.autoneg;
12184 static int tg3_set_link_ksettings(struct net_device *dev,
12185 const struct ethtool_link_ksettings *cmd)
12187 struct tg3 *tp = netdev_priv(dev);
12188 u32 speed = cmd->base.speed;
12191 if (tg3_flag(tp, USE_PHYLIB)) {
12192 struct phy_device *phydev;
12193 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12195 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12196 return phy_ethtool_ksettings_set(phydev, cmd);
12199 if (cmd->base.autoneg != AUTONEG_ENABLE &&
12200 cmd->base.autoneg != AUTONEG_DISABLE)
12203 if (cmd->base.autoneg == AUTONEG_DISABLE &&
12204 cmd->base.duplex != DUPLEX_FULL &&
12205 cmd->base.duplex != DUPLEX_HALF)
12208 ethtool_convert_link_mode_to_legacy_u32(&advertising,
12209 cmd->link_modes.advertising);
12211 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12212 u32 mask = ADVERTISED_Autoneg |
12214 ADVERTISED_Asym_Pause;
12216 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12217 mask |= ADVERTISED_1000baseT_Half |
12218 ADVERTISED_1000baseT_Full;
12220 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12221 mask |= ADVERTISED_100baseT_Half |
12222 ADVERTISED_100baseT_Full |
12223 ADVERTISED_10baseT_Half |
12224 ADVERTISED_10baseT_Full |
12227 mask |= ADVERTISED_FIBRE;
12229 if (advertising & ~mask)
12232 mask &= (ADVERTISED_1000baseT_Half |
12233 ADVERTISED_1000baseT_Full |
12234 ADVERTISED_100baseT_Half |
12235 ADVERTISED_100baseT_Full |
12236 ADVERTISED_10baseT_Half |
12237 ADVERTISED_10baseT_Full);
12239 advertising &= mask;
12241 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12242 if (speed != SPEED_1000)
12245 if (cmd->base.duplex != DUPLEX_FULL)
12248 if (speed != SPEED_100 &&
12254 tg3_full_lock(tp, 0);
12256 tp->link_config.autoneg = cmd->base.autoneg;
12257 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12258 tp->link_config.advertising = (advertising |
12259 ADVERTISED_Autoneg);
12260 tp->link_config.speed = SPEED_UNKNOWN;
12261 tp->link_config.duplex = DUPLEX_UNKNOWN;
12263 tp->link_config.advertising = 0;
12264 tp->link_config.speed = speed;
12265 tp->link_config.duplex = cmd->base.duplex;
12268 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12270 tg3_warn_mgmt_link_flap(tp);
12272 if (netif_running(dev))
12273 tg3_setup_phy(tp, true);
12275 tg3_full_unlock(tp);
12280 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12282 struct tg3 *tp = netdev_priv(dev);
12284 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12285 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12286 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12287 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12290 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12292 struct tg3 *tp = netdev_priv(dev);
12294 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12295 wol->supported = WAKE_MAGIC;
12297 wol->supported = 0;
12299 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12300 wol->wolopts = WAKE_MAGIC;
12301 memset(&wol->sopass, 0, sizeof(wol->sopass));
12304 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12306 struct tg3 *tp = netdev_priv(dev);
12307 struct device *dp = &tp->pdev->dev;
12309 if (wol->wolopts & ~WAKE_MAGIC)
12311 if ((wol->wolopts & WAKE_MAGIC) &&
12312 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12315 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12317 if (device_may_wakeup(dp))
12318 tg3_flag_set(tp, WOL_ENABLE);
12320 tg3_flag_clear(tp, WOL_ENABLE);
12325 static u32 tg3_get_msglevel(struct net_device *dev)
12327 struct tg3 *tp = netdev_priv(dev);
12328 return tp->msg_enable;
12331 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12333 struct tg3 *tp = netdev_priv(dev);
12334 tp->msg_enable = value;
12337 static int tg3_nway_reset(struct net_device *dev)
12339 struct tg3 *tp = netdev_priv(dev);
12342 if (!netif_running(dev))
12345 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12348 tg3_warn_mgmt_link_flap(tp);
12350 if (tg3_flag(tp, USE_PHYLIB)) {
12351 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12353 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12357 spin_lock_bh(&tp->lock);
12359 tg3_readphy(tp, MII_BMCR, &bmcr);
12360 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12361 ((bmcr & BMCR_ANENABLE) ||
12362 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12363 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12367 spin_unlock_bh(&tp->lock);
12373 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12375 struct tg3 *tp = netdev_priv(dev);
12377 ering->rx_max_pending = tp->rx_std_ring_mask;
12378 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12379 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12381 ering->rx_jumbo_max_pending = 0;
12383 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12385 ering->rx_pending = tp->rx_pending;
12386 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12387 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12389 ering->rx_jumbo_pending = 0;
12391 ering->tx_pending = tp->napi[0].tx_pending;
12394 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12396 struct tg3 *tp = netdev_priv(dev);
12397 int i, irq_sync = 0, err = 0;
12398 bool reset_phy = false;
12400 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12401 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12402 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12403 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12404 (tg3_flag(tp, TSO_BUG) &&
12405 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12408 if (netif_running(dev)) {
12410 tg3_netif_stop(tp);
12414 tg3_full_lock(tp, irq_sync);
12416 tp->rx_pending = ering->rx_pending;
12418 if (tg3_flag(tp, MAX_RXPEND_64) &&
12419 tp->rx_pending > 63)
12420 tp->rx_pending = 63;
12422 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12423 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12425 for (i = 0; i < tp->irq_max; i++)
12426 tp->napi[i].tx_pending = ering->tx_pending;
12428 if (netif_running(dev)) {
12429 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12430 /* Reset PHY to avoid PHY lock up */
12431 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12432 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12433 tg3_asic_rev(tp) == ASIC_REV_5720)
12436 err = tg3_restart_hw(tp, reset_phy);
12438 tg3_netif_start(tp);
12441 tg3_full_unlock(tp);
12443 if (irq_sync && !err)
12449 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12451 struct tg3 *tp = netdev_priv(dev);
12453 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12455 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12456 epause->rx_pause = 1;
12458 epause->rx_pause = 0;
12460 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12461 epause->tx_pause = 1;
12463 epause->tx_pause = 0;
12466 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12468 struct tg3 *tp = netdev_priv(dev);
12470 bool reset_phy = false;
12472 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12473 tg3_warn_mgmt_link_flap(tp);
12475 if (tg3_flag(tp, USE_PHYLIB)) {
12477 struct phy_device *phydev;
12479 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12481 if (!(phydev->supported & SUPPORTED_Pause) ||
12482 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12483 (epause->rx_pause != epause->tx_pause)))
12486 tp->link_config.flowctrl = 0;
12487 if (epause->rx_pause) {
12488 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12490 if (epause->tx_pause) {
12491 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12492 newadv = ADVERTISED_Pause;
12494 newadv = ADVERTISED_Pause |
12495 ADVERTISED_Asym_Pause;
12496 } else if (epause->tx_pause) {
12497 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12498 newadv = ADVERTISED_Asym_Pause;
12502 if (epause->autoneg)
12503 tg3_flag_set(tp, PAUSE_AUTONEG);
12505 tg3_flag_clear(tp, PAUSE_AUTONEG);
12507 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12508 u32 oldadv = phydev->advertising &
12509 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12510 if (oldadv != newadv) {
12511 phydev->advertising &=
12512 ~(ADVERTISED_Pause |
12513 ADVERTISED_Asym_Pause);
12514 phydev->advertising |= newadv;
12515 if (phydev->autoneg) {
12517 * Always renegotiate the link to
12518 * inform our link partner of our
12519 * flow control settings, even if the
12520 * flow control is forced. Let
12521 * tg3_adjust_link() do the final
12522 * flow control setup.
12524 return phy_start_aneg(phydev);
12528 if (!epause->autoneg)
12529 tg3_setup_flow_control(tp, 0, 0);
12531 tp->link_config.advertising &=
12532 ~(ADVERTISED_Pause |
12533 ADVERTISED_Asym_Pause);
12534 tp->link_config.advertising |= newadv;
12539 if (netif_running(dev)) {
12540 tg3_netif_stop(tp);
12544 tg3_full_lock(tp, irq_sync);
12546 if (epause->autoneg)
12547 tg3_flag_set(tp, PAUSE_AUTONEG);
12549 tg3_flag_clear(tp, PAUSE_AUTONEG);
12550 if (epause->rx_pause)
12551 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12553 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12554 if (epause->tx_pause)
12555 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12557 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12559 if (netif_running(dev)) {
12560 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12561 /* Reset PHY to avoid PHY lock up */
12562 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12563 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12564 tg3_asic_rev(tp) == ASIC_REV_5720)
12567 err = tg3_restart_hw(tp, reset_phy);
12569 tg3_netif_start(tp);
12572 tg3_full_unlock(tp);
12575 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12580 static int tg3_get_sset_count(struct net_device *dev, int sset)
12584 return TG3_NUM_TEST;
12586 return TG3_NUM_STATS;
12588 return -EOPNOTSUPP;
12592 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12593 u32 *rules __always_unused)
12595 struct tg3 *tp = netdev_priv(dev);
12597 if (!tg3_flag(tp, SUPPORT_MSIX))
12598 return -EOPNOTSUPP;
12600 switch (info->cmd) {
12601 case ETHTOOL_GRXRINGS:
12602 if (netif_running(tp->dev))
12603 info->data = tp->rxq_cnt;
12605 info->data = num_online_cpus();
12606 if (info->data > TG3_RSS_MAX_NUM_QS)
12607 info->data = TG3_RSS_MAX_NUM_QS;
12613 return -EOPNOTSUPP;
12617 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12620 struct tg3 *tp = netdev_priv(dev);
12622 if (tg3_flag(tp, SUPPORT_MSIX))
12623 size = TG3_RSS_INDIR_TBL_SIZE;
12628 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12630 struct tg3 *tp = netdev_priv(dev);
12634 *hfunc = ETH_RSS_HASH_TOP;
12638 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12639 indir[i] = tp->rss_ind_tbl[i];
12644 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12647 struct tg3 *tp = netdev_priv(dev);
12650 /* We require at least one supported parameter to be changed and no
12651 * change in any of the unsupported parameters
12654 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12655 return -EOPNOTSUPP;
12660 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12661 tp->rss_ind_tbl[i] = indir[i];
12663 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12666 /* It is legal to write the indirection
12667 * table while the device is running.
12669 tg3_full_lock(tp, 0);
12670 tg3_rss_write_indir_tbl(tp);
12671 tg3_full_unlock(tp);
12676 static void tg3_get_channels(struct net_device *dev,
12677 struct ethtool_channels *channel)
12679 struct tg3 *tp = netdev_priv(dev);
12680 u32 deflt_qs = netif_get_num_default_rss_queues();
12682 channel->max_rx = tp->rxq_max;
12683 channel->max_tx = tp->txq_max;
12685 if (netif_running(dev)) {
12686 channel->rx_count = tp->rxq_cnt;
12687 channel->tx_count = tp->txq_cnt;
12690 channel->rx_count = tp->rxq_req;
12692 channel->rx_count = min(deflt_qs, tp->rxq_max);
12695 channel->tx_count = tp->txq_req;
12697 channel->tx_count = min(deflt_qs, tp->txq_max);
12701 static int tg3_set_channels(struct net_device *dev,
12702 struct ethtool_channels *channel)
12704 struct tg3 *tp = netdev_priv(dev);
12706 if (!tg3_flag(tp, SUPPORT_MSIX))
12707 return -EOPNOTSUPP;
12709 if (channel->rx_count > tp->rxq_max ||
12710 channel->tx_count > tp->txq_max)
12713 tp->rxq_req = channel->rx_count;
12714 tp->txq_req = channel->tx_count;
12716 if (!netif_running(dev))
12721 tg3_carrier_off(tp);
12723 tg3_start(tp, true, false, false);
12728 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12730 switch (stringset) {
12732 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
12735 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
12738 WARN_ON(1); /* we need a WARN() */
12743 static int tg3_set_phys_id(struct net_device *dev,
12744 enum ethtool_phys_id_state state)
12746 struct tg3 *tp = netdev_priv(dev);
12748 if (!netif_running(tp->dev))
12752 case ETHTOOL_ID_ACTIVE:
12753 return 1; /* cycle on/off once per second */
12755 case ETHTOOL_ID_ON:
12756 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12757 LED_CTRL_1000MBPS_ON |
12758 LED_CTRL_100MBPS_ON |
12759 LED_CTRL_10MBPS_ON |
12760 LED_CTRL_TRAFFIC_OVERRIDE |
12761 LED_CTRL_TRAFFIC_BLINK |
12762 LED_CTRL_TRAFFIC_LED);
12765 case ETHTOOL_ID_OFF:
12766 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12767 LED_CTRL_TRAFFIC_OVERRIDE);
12770 case ETHTOOL_ID_INACTIVE:
12771 tw32(MAC_LED_CTRL, tp->led_ctrl);
12778 static void tg3_get_ethtool_stats(struct net_device *dev,
12779 struct ethtool_stats *estats, u64 *tmp_stats)
12781 struct tg3 *tp = netdev_priv(dev);
12784 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12786 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12789 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12793 u32 offset = 0, len = 0;
12796 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12799 if (magic == TG3_EEPROM_MAGIC) {
12800 for (offset = TG3_NVM_DIR_START;
12801 offset < TG3_NVM_DIR_END;
12802 offset += TG3_NVM_DIRENT_SIZE) {
12803 if (tg3_nvram_read(tp, offset, &val))
12806 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12807 TG3_NVM_DIRTYPE_EXTVPD)
12811 if (offset != TG3_NVM_DIR_END) {
12812 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12813 if (tg3_nvram_read(tp, offset + 4, &offset))
12816 offset = tg3_nvram_logical_addr(tp, offset);
12820 if (!offset || !len) {
12821 offset = TG3_NVM_VPD_OFF;
12822 len = TG3_NVM_VPD_LEN;
12825 buf = kmalloc(len, GFP_KERNEL);
12829 if (magic == TG3_EEPROM_MAGIC) {
12830 for (i = 0; i < len; i += 4) {
12831 /* The data is in little-endian format in NVRAM.
12832 * Use the big-endian read routines to preserve
12833 * the byte order as it exists in NVRAM.
12835 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12841 unsigned int pos = 0;
12843 ptr = (u8 *)&buf[0];
12844 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12845 cnt = pci_read_vpd(tp->pdev, pos,
12847 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12865 #define NVRAM_TEST_SIZE 0x100
12866 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12867 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12868 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12869 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12870 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12871 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12872 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12873 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12875 static int tg3_test_nvram(struct tg3 *tp)
12877 u32 csum, magic, len;
12879 int i, j, k, err = 0, size;
12881 if (tg3_flag(tp, NO_NVRAM))
12884 if (tg3_nvram_read(tp, 0, &magic) != 0)
12887 if (magic == TG3_EEPROM_MAGIC)
12888 size = NVRAM_TEST_SIZE;
12889 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12890 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12891 TG3_EEPROM_SB_FORMAT_1) {
12892 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12893 case TG3_EEPROM_SB_REVISION_0:
12894 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12896 case TG3_EEPROM_SB_REVISION_2:
12897 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12899 case TG3_EEPROM_SB_REVISION_3:
12900 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12902 case TG3_EEPROM_SB_REVISION_4:
12903 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12905 case TG3_EEPROM_SB_REVISION_5:
12906 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12908 case TG3_EEPROM_SB_REVISION_6:
12909 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12916 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12917 size = NVRAM_SELFBOOT_HW_SIZE;
12921 buf = kmalloc(size, GFP_KERNEL);
12926 for (i = 0, j = 0; i < size; i += 4, j++) {
12927 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12934 /* Selfboot format */
12935 magic = be32_to_cpu(buf[0]);
12936 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12937 TG3_EEPROM_MAGIC_FW) {
12938 u8 *buf8 = (u8 *) buf, csum8 = 0;
12940 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12941 TG3_EEPROM_SB_REVISION_2) {
12942 /* For rev 2, the csum doesn't include the MBA. */
12943 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12945 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12948 for (i = 0; i < size; i++)
12961 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12962 TG3_EEPROM_MAGIC_HW) {
12963 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12964 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12965 u8 *buf8 = (u8 *) buf;
12967 /* Separate the parity bits and the data bytes. */
12968 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12969 if ((i == 0) || (i == 8)) {
12973 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12974 parity[k++] = buf8[i] & msk;
12976 } else if (i == 16) {
12980 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12981 parity[k++] = buf8[i] & msk;
12984 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12985 parity[k++] = buf8[i] & msk;
12988 data[j++] = buf8[i];
12992 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12993 u8 hw8 = hweight8(data[i]);
12995 if ((hw8 & 0x1) && parity[i])
12997 else if (!(hw8 & 0x1) && !parity[i])
13006 /* Bootstrap checksum at offset 0x10 */
13007 csum = calc_crc((unsigned char *) buf, 0x10);
13008 if (csum != le32_to_cpu(buf[0x10/4]))
13011 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13012 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
13013 if (csum != le32_to_cpu(buf[0xfc/4]))
13018 buf = tg3_vpd_readblock(tp, &len);
13022 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
13024 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
13028 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
13031 i += PCI_VPD_LRDT_TAG_SIZE;
13032 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
13033 PCI_VPD_RO_KEYWORD_CHKSUM);
13037 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13039 for (i = 0; i <= j; i++)
13040 csum8 += ((u8 *)buf)[i];
13054 #define TG3_SERDES_TIMEOUT_SEC 2
13055 #define TG3_COPPER_TIMEOUT_SEC 6
13057 static int tg3_test_link(struct tg3 *tp)
13061 if (!netif_running(tp->dev))
13064 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13065 max = TG3_SERDES_TIMEOUT_SEC;
13067 max = TG3_COPPER_TIMEOUT_SEC;
13069 for (i = 0; i < max; i++) {
13073 if (msleep_interruptible(1000))
13080 /* Only test the commonly used registers */
13081 static int tg3_test_registers(struct tg3 *tp)
13083 int i, is_5705, is_5750;
13084 u32 offset, read_mask, write_mask, val, save_val, read_val;
13088 #define TG3_FL_5705 0x1
13089 #define TG3_FL_NOT_5705 0x2
13090 #define TG3_FL_NOT_5788 0x4
13091 #define TG3_FL_NOT_5750 0x8
13095 /* MAC Control Registers */
13096 { MAC_MODE, TG3_FL_NOT_5705,
13097 0x00000000, 0x00ef6f8c },
13098 { MAC_MODE, TG3_FL_5705,
13099 0x00000000, 0x01ef6b8c },
13100 { MAC_STATUS, TG3_FL_NOT_5705,
13101 0x03800107, 0x00000000 },
13102 { MAC_STATUS, TG3_FL_5705,
13103 0x03800100, 0x00000000 },
13104 { MAC_ADDR_0_HIGH, 0x0000,
13105 0x00000000, 0x0000ffff },
13106 { MAC_ADDR_0_LOW, 0x0000,
13107 0x00000000, 0xffffffff },
13108 { MAC_RX_MTU_SIZE, 0x0000,
13109 0x00000000, 0x0000ffff },
13110 { MAC_TX_MODE, 0x0000,
13111 0x00000000, 0x00000070 },
13112 { MAC_TX_LENGTHS, 0x0000,
13113 0x00000000, 0x00003fff },
13114 { MAC_RX_MODE, TG3_FL_NOT_5705,
13115 0x00000000, 0x000007fc },
13116 { MAC_RX_MODE, TG3_FL_5705,
13117 0x00000000, 0x000007dc },
13118 { MAC_HASH_REG_0, 0x0000,
13119 0x00000000, 0xffffffff },
13120 { MAC_HASH_REG_1, 0x0000,
13121 0x00000000, 0xffffffff },
13122 { MAC_HASH_REG_2, 0x0000,
13123 0x00000000, 0xffffffff },
13124 { MAC_HASH_REG_3, 0x0000,
13125 0x00000000, 0xffffffff },
13127 /* Receive Data and Receive BD Initiator Control Registers. */
13128 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13129 0x00000000, 0xffffffff },
13130 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13131 0x00000000, 0xffffffff },
13132 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13133 0x00000000, 0x00000003 },
13134 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13135 0x00000000, 0xffffffff },
13136 { RCVDBDI_STD_BD+0, 0x0000,
13137 0x00000000, 0xffffffff },
13138 { RCVDBDI_STD_BD+4, 0x0000,
13139 0x00000000, 0xffffffff },
13140 { RCVDBDI_STD_BD+8, 0x0000,
13141 0x00000000, 0xffff0002 },
13142 { RCVDBDI_STD_BD+0xc, 0x0000,
13143 0x00000000, 0xffffffff },
13145 /* Receive BD Initiator Control Registers. */
13146 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13147 0x00000000, 0xffffffff },
13148 { RCVBDI_STD_THRESH, TG3_FL_5705,
13149 0x00000000, 0x000003ff },
13150 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13151 0x00000000, 0xffffffff },
13153 /* Host Coalescing Control Registers. */
13154 { HOSTCC_MODE, TG3_FL_NOT_5705,
13155 0x00000000, 0x00000004 },
13156 { HOSTCC_MODE, TG3_FL_5705,
13157 0x00000000, 0x000000f6 },
13158 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13159 0x00000000, 0xffffffff },
13160 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13161 0x00000000, 0x000003ff },
13162 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13163 0x00000000, 0xffffffff },
13164 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13165 0x00000000, 0x000003ff },
13166 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13167 0x00000000, 0xffffffff },
13168 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13169 0x00000000, 0x000000ff },
13170 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13171 0x00000000, 0xffffffff },
13172 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13173 0x00000000, 0x000000ff },
13174 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13175 0x00000000, 0xffffffff },
13176 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13177 0x00000000, 0xffffffff },
13178 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13179 0x00000000, 0xffffffff },
13180 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13181 0x00000000, 0x000000ff },
13182 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13183 0x00000000, 0xffffffff },
13184 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13185 0x00000000, 0x000000ff },
13186 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13187 0x00000000, 0xffffffff },
13188 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13189 0x00000000, 0xffffffff },
13190 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13191 0x00000000, 0xffffffff },
13192 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13193 0x00000000, 0xffffffff },
13194 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13195 0x00000000, 0xffffffff },
13196 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13197 0xffffffff, 0x00000000 },
13198 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13199 0xffffffff, 0x00000000 },
13201 /* Buffer Manager Control Registers. */
13202 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13203 0x00000000, 0x007fff80 },
13204 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13205 0x00000000, 0x007fffff },
13206 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13207 0x00000000, 0x0000003f },
13208 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13209 0x00000000, 0x000001ff },
13210 { BUFMGR_MB_HIGH_WATER, 0x0000,
13211 0x00000000, 0x000001ff },
13212 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13213 0xffffffff, 0x00000000 },
13214 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13215 0xffffffff, 0x00000000 },
13217 /* Mailbox Registers */
13218 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13219 0x00000000, 0x000001ff },
13220 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13221 0x00000000, 0x000001ff },
13222 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13223 0x00000000, 0x000007ff },
13224 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13225 0x00000000, 0x000001ff },
13227 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13230 is_5705 = is_5750 = 0;
13231 if (tg3_flag(tp, 5705_PLUS)) {
13233 if (tg3_flag(tp, 5750_PLUS))
13237 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13238 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13241 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13244 if (tg3_flag(tp, IS_5788) &&
13245 (reg_tbl[i].flags & TG3_FL_NOT_5788))
13248 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13251 offset = (u32) reg_tbl[i].offset;
13252 read_mask = reg_tbl[i].read_mask;
13253 write_mask = reg_tbl[i].write_mask;
13255 /* Save the original register content */
13256 save_val = tr32(offset);
13258 /* Determine the read-only value. */
13259 read_val = save_val & read_mask;
13261 /* Write zero to the register, then make sure the read-only bits
13262 * are not changed and the read/write bits are all zeros.
13266 val = tr32(offset);
13268 /* Test the read-only and read/write bits. */
13269 if (((val & read_mask) != read_val) || (val & write_mask))
13272 /* Write ones to all the bits defined by RdMask and WrMask, then
13273 * make sure the read-only bits are not changed and the
13274 * read/write bits are all ones.
13276 tw32(offset, read_mask | write_mask);
13278 val = tr32(offset);
13280 /* Test the read-only bits. */
13281 if ((val & read_mask) != read_val)
13284 /* Test the read/write bits. */
13285 if ((val & write_mask) != write_mask)
13288 tw32(offset, save_val);
13294 if (netif_msg_hw(tp))
13295 netdev_err(tp->dev,
13296 "Register test failed at offset %x\n", offset);
13297 tw32(offset, save_val);
13301 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13303 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13307 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13308 for (j = 0; j < len; j += 4) {
13311 tg3_write_mem(tp, offset + j, test_pattern[i]);
13312 tg3_read_mem(tp, offset + j, &val);
13313 if (val != test_pattern[i])
13320 static int tg3_test_memory(struct tg3 *tp)
13322 static struct mem_entry {
13325 } mem_tbl_570x[] = {
13326 { 0x00000000, 0x00b50},
13327 { 0x00002000, 0x1c000},
13328 { 0xffffffff, 0x00000}
13329 }, mem_tbl_5705[] = {
13330 { 0x00000100, 0x0000c},
13331 { 0x00000200, 0x00008},
13332 { 0x00004000, 0x00800},
13333 { 0x00006000, 0x01000},
13334 { 0x00008000, 0x02000},
13335 { 0x00010000, 0x0e000},
13336 { 0xffffffff, 0x00000}
13337 }, mem_tbl_5755[] = {
13338 { 0x00000200, 0x00008},
13339 { 0x00004000, 0x00800},
13340 { 0x00006000, 0x00800},
13341 { 0x00008000, 0x02000},
13342 { 0x00010000, 0x0c000},
13343 { 0xffffffff, 0x00000}
13344 }, mem_tbl_5906[] = {
13345 { 0x00000200, 0x00008},
13346 { 0x00004000, 0x00400},
13347 { 0x00006000, 0x00400},
13348 { 0x00008000, 0x01000},
13349 { 0x00010000, 0x01000},
13350 { 0xffffffff, 0x00000}
13351 }, mem_tbl_5717[] = {
13352 { 0x00000200, 0x00008},
13353 { 0x00010000, 0x0a000},
13354 { 0x00020000, 0x13c00},
13355 { 0xffffffff, 0x00000}
13356 }, mem_tbl_57765[] = {
13357 { 0x00000200, 0x00008},
13358 { 0x00004000, 0x00800},
13359 { 0x00006000, 0x09800},
13360 { 0x00010000, 0x0a000},
13361 { 0xffffffff, 0x00000}
13363 struct mem_entry *mem_tbl;
13367 if (tg3_flag(tp, 5717_PLUS))
13368 mem_tbl = mem_tbl_5717;
13369 else if (tg3_flag(tp, 57765_CLASS) ||
13370 tg3_asic_rev(tp) == ASIC_REV_5762)
13371 mem_tbl = mem_tbl_57765;
13372 else if (tg3_flag(tp, 5755_PLUS))
13373 mem_tbl = mem_tbl_5755;
13374 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13375 mem_tbl = mem_tbl_5906;
13376 else if (tg3_flag(tp, 5705_PLUS))
13377 mem_tbl = mem_tbl_5705;
13379 mem_tbl = mem_tbl_570x;
13381 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13382 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13390 #define TG3_TSO_MSS 500
13392 #define TG3_TSO_IP_HDR_LEN 20
13393 #define TG3_TSO_TCP_HDR_LEN 20
13394 #define TG3_TSO_TCP_OPT_LEN 12
13396 static const u8 tg3_tso_header[] = {
13398 0x45, 0x00, 0x00, 0x00,
13399 0x00, 0x00, 0x40, 0x00,
13400 0x40, 0x06, 0x00, 0x00,
13401 0x0a, 0x00, 0x00, 0x01,
13402 0x0a, 0x00, 0x00, 0x02,
13403 0x0d, 0x00, 0xe0, 0x00,
13404 0x00, 0x00, 0x01, 0x00,
13405 0x00, 0x00, 0x02, 0x00,
13406 0x80, 0x10, 0x10, 0x00,
13407 0x14, 0x09, 0x00, 0x00,
13408 0x01, 0x01, 0x08, 0x0a,
13409 0x11, 0x11, 0x11, 0x11,
13410 0x11, 0x11, 0x11, 0x11,
13413 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13415 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13416 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13418 struct sk_buff *skb;
13419 u8 *tx_data, *rx_data;
13421 int num_pkts, tx_len, rx_len, i, err;
13422 struct tg3_rx_buffer_desc *desc;
13423 struct tg3_napi *tnapi, *rnapi;
13424 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13426 tnapi = &tp->napi[0];
13427 rnapi = &tp->napi[0];
13428 if (tp->irq_cnt > 1) {
13429 if (tg3_flag(tp, ENABLE_RSS))
13430 rnapi = &tp->napi[1];
13431 if (tg3_flag(tp, ENABLE_TSS))
13432 tnapi = &tp->napi[1];
13434 coal_now = tnapi->coal_now | rnapi->coal_now;
13439 skb = netdev_alloc_skb(tp->dev, tx_len);
13443 tx_data = skb_put(skb, tx_len);
13444 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13445 memset(tx_data + ETH_ALEN, 0x0, 8);
13447 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13449 if (tso_loopback) {
13450 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13452 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13453 TG3_TSO_TCP_OPT_LEN;
13455 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13456 sizeof(tg3_tso_header));
13459 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13460 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13462 /* Set the total length field in the IP header */
13463 iph->tot_len = htons((u16)(mss + hdr_len));
13465 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13466 TXD_FLAG_CPU_POST_DMA);
13468 if (tg3_flag(tp, HW_TSO_1) ||
13469 tg3_flag(tp, HW_TSO_2) ||
13470 tg3_flag(tp, HW_TSO_3)) {
13472 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13473 th = (struct tcphdr *)&tx_data[val];
13476 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13478 if (tg3_flag(tp, HW_TSO_3)) {
13479 mss |= (hdr_len & 0xc) << 12;
13480 if (hdr_len & 0x10)
13481 base_flags |= 0x00000010;
13482 base_flags |= (hdr_len & 0x3e0) << 5;
13483 } else if (tg3_flag(tp, HW_TSO_2))
13484 mss |= hdr_len << 9;
13485 else if (tg3_flag(tp, HW_TSO_1) ||
13486 tg3_asic_rev(tp) == ASIC_REV_5705) {
13487 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13489 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13492 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13495 data_off = ETH_HLEN;
13497 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13498 tx_len > VLAN_ETH_FRAME_LEN)
13499 base_flags |= TXD_FLAG_JMB_PKT;
13502 for (i = data_off; i < tx_len; i++)
13503 tx_data[i] = (u8) (i & 0xff);
13505 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13506 if (pci_dma_mapping_error(tp->pdev, map)) {
13507 dev_kfree_skb(skb);
13511 val = tnapi->tx_prod;
13512 tnapi->tx_buffers[val].skb = skb;
13513 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13515 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13520 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13522 budget = tg3_tx_avail(tnapi);
13523 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13524 base_flags | TXD_FLAG_END, mss, 0)) {
13525 tnapi->tx_buffers[val].skb = NULL;
13526 dev_kfree_skb(skb);
13532 /* Sync BD data before updating mailbox */
13535 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13536 tr32_mailbox(tnapi->prodmbox);
13540 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13541 for (i = 0; i < 35; i++) {
13542 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13547 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13548 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13549 if ((tx_idx == tnapi->tx_prod) &&
13550 (rx_idx == (rx_start_idx + num_pkts)))
13554 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13555 dev_kfree_skb(skb);
13557 if (tx_idx != tnapi->tx_prod)
13560 if (rx_idx != rx_start_idx + num_pkts)
13564 while (rx_idx != rx_start_idx) {
13565 desc = &rnapi->rx_rcb[rx_start_idx++];
13566 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13567 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13569 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13570 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13573 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13576 if (!tso_loopback) {
13577 if (rx_len != tx_len)
13580 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13581 if (opaque_key != RXD_OPAQUE_RING_STD)
13584 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13587 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13588 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13589 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13593 if (opaque_key == RXD_OPAQUE_RING_STD) {
13594 rx_data = tpr->rx_std_buffers[desc_idx].data;
13595 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13597 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13598 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13599 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13604 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13605 PCI_DMA_FROMDEVICE);
13607 rx_data += TG3_RX_OFFSET(tp);
13608 for (i = data_off; i < rx_len; i++, val++) {
13609 if (*(rx_data + i) != (u8) (val & 0xff))
13616 /* tg3_free_rings will unmap and free the rx_data */
13621 #define TG3_STD_LOOPBACK_FAILED 1
13622 #define TG3_JMB_LOOPBACK_FAILED 2
13623 #define TG3_TSO_LOOPBACK_FAILED 4
13624 #define TG3_LOOPBACK_FAILED \
13625 (TG3_STD_LOOPBACK_FAILED | \
13626 TG3_JMB_LOOPBACK_FAILED | \
13627 TG3_TSO_LOOPBACK_FAILED)
13629 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13633 u32 jmb_pkt_sz = 9000;
13636 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13638 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13639 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13641 if (!netif_running(tp->dev)) {
13642 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13643 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13645 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13649 err = tg3_reset_hw(tp, true);
13651 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13652 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13654 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13658 if (tg3_flag(tp, ENABLE_RSS)) {
13661 /* Reroute all rx packets to the 1st queue */
13662 for (i = MAC_RSS_INDIR_TBL_0;
13663 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13667 /* HW errata - mac loopback fails in some cases on 5780.
13668 * Normal traffic and PHY loopback are not affected by
13669 * errata. Also, the MAC loopback test is deprecated for
13670 * all newer ASIC revisions.
13672 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13673 !tg3_flag(tp, CPMU_PRESENT)) {
13674 tg3_mac_loopback(tp, true);
13676 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13677 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13679 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13680 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13681 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13683 tg3_mac_loopback(tp, false);
13686 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13687 !tg3_flag(tp, USE_PHYLIB)) {
13690 tg3_phy_lpbk_set(tp, 0, false);
13692 /* Wait for link */
13693 for (i = 0; i < 100; i++) {
13694 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13699 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13700 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13701 if (tg3_flag(tp, TSO_CAPABLE) &&
13702 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13703 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13704 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13705 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13706 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13709 tg3_phy_lpbk_set(tp, 0, true);
13711 /* All link indications report up, but the hardware
13712 * isn't really ready for about 20 msec. Double it
13717 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13718 data[TG3_EXT_LOOPB_TEST] |=
13719 TG3_STD_LOOPBACK_FAILED;
13720 if (tg3_flag(tp, TSO_CAPABLE) &&
13721 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13722 data[TG3_EXT_LOOPB_TEST] |=
13723 TG3_TSO_LOOPBACK_FAILED;
13724 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13725 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13726 data[TG3_EXT_LOOPB_TEST] |=
13727 TG3_JMB_LOOPBACK_FAILED;
13730 /* Re-enable gphy autopowerdown. */
13731 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13732 tg3_phy_toggle_apd(tp, true);
13735 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13736 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13739 tp->phy_flags |= eee_cap;
13744 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13747 struct tg3 *tp = netdev_priv(dev);
13748 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13750 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13751 if (tg3_power_up(tp)) {
13752 etest->flags |= ETH_TEST_FL_FAILED;
13753 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13756 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13759 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13761 if (tg3_test_nvram(tp) != 0) {
13762 etest->flags |= ETH_TEST_FL_FAILED;
13763 data[TG3_NVRAM_TEST] = 1;
13765 if (!doextlpbk && tg3_test_link(tp)) {
13766 etest->flags |= ETH_TEST_FL_FAILED;
13767 data[TG3_LINK_TEST] = 1;
13769 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13770 int err, err2 = 0, irq_sync = 0;
13772 if (netif_running(dev)) {
13774 tg3_netif_stop(tp);
13778 tg3_full_lock(tp, irq_sync);
13779 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13780 err = tg3_nvram_lock(tp);
13781 tg3_halt_cpu(tp, RX_CPU_BASE);
13782 if (!tg3_flag(tp, 5705_PLUS))
13783 tg3_halt_cpu(tp, TX_CPU_BASE);
13785 tg3_nvram_unlock(tp);
13787 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13790 if (tg3_test_registers(tp) != 0) {
13791 etest->flags |= ETH_TEST_FL_FAILED;
13792 data[TG3_REGISTER_TEST] = 1;
13795 if (tg3_test_memory(tp) != 0) {
13796 etest->flags |= ETH_TEST_FL_FAILED;
13797 data[TG3_MEMORY_TEST] = 1;
13801 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13803 if (tg3_test_loopback(tp, data, doextlpbk))
13804 etest->flags |= ETH_TEST_FL_FAILED;
13806 tg3_full_unlock(tp);
13808 if (tg3_test_interrupt(tp) != 0) {
13809 etest->flags |= ETH_TEST_FL_FAILED;
13810 data[TG3_INTERRUPT_TEST] = 1;
13813 tg3_full_lock(tp, 0);
13815 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13816 if (netif_running(dev)) {
13817 tg3_flag_set(tp, INIT_COMPLETE);
13818 err2 = tg3_restart_hw(tp, true);
13820 tg3_netif_start(tp);
13823 tg3_full_unlock(tp);
13825 if (irq_sync && !err2)
13828 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13829 tg3_power_down_prepare(tp);
13833 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13835 struct tg3 *tp = netdev_priv(dev);
13836 struct hwtstamp_config stmpconf;
13838 if (!tg3_flag(tp, PTP_CAPABLE))
13839 return -EOPNOTSUPP;
13841 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13844 if (stmpconf.flags)
13847 if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13848 stmpconf.tx_type != HWTSTAMP_TX_OFF)
13851 switch (stmpconf.rx_filter) {
13852 case HWTSTAMP_FILTER_NONE:
13855 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13856 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13857 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13859 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13860 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13861 TG3_RX_PTP_CTL_SYNC_EVNT;
13863 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13864 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13865 TG3_RX_PTP_CTL_DELAY_REQ;
13867 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13868 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13869 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13871 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13872 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13873 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13875 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13876 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13877 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13879 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13880 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13881 TG3_RX_PTP_CTL_SYNC_EVNT;
13883 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13884 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13885 TG3_RX_PTP_CTL_SYNC_EVNT;
13887 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13888 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13889 TG3_RX_PTP_CTL_SYNC_EVNT;
13891 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13892 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13893 TG3_RX_PTP_CTL_DELAY_REQ;
13895 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13896 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13897 TG3_RX_PTP_CTL_DELAY_REQ;
13899 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13900 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13901 TG3_RX_PTP_CTL_DELAY_REQ;
13907 if (netif_running(dev) && tp->rxptpctl)
13908 tw32(TG3_RX_PTP_CTL,
13909 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13911 if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13912 tg3_flag_set(tp, TX_TSTAMP_EN);
13914 tg3_flag_clear(tp, TX_TSTAMP_EN);
13916 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13920 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13922 struct tg3 *tp = netdev_priv(dev);
13923 struct hwtstamp_config stmpconf;
13925 if (!tg3_flag(tp, PTP_CAPABLE))
13926 return -EOPNOTSUPP;
13928 stmpconf.flags = 0;
13929 stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13930 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13932 switch (tp->rxptpctl) {
13934 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13936 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13937 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13939 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13940 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13942 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13943 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13945 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13946 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13948 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13949 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13951 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13952 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13954 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13955 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13957 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13958 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13960 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13961 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13963 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13964 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13966 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13967 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13969 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13970 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13977 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13981 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13983 struct mii_ioctl_data *data = if_mii(ifr);
13984 struct tg3 *tp = netdev_priv(dev);
13987 if (tg3_flag(tp, USE_PHYLIB)) {
13988 struct phy_device *phydev;
13989 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13991 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
13992 return phy_mii_ioctl(phydev, ifr, cmd);
13997 data->phy_id = tp->phy_addr;
14000 case SIOCGMIIREG: {
14003 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14004 break; /* We have no PHY */
14006 if (!netif_running(dev))
14009 spin_lock_bh(&tp->lock);
14010 err = __tg3_readphy(tp, data->phy_id & 0x1f,
14011 data->reg_num & 0x1f, &mii_regval);
14012 spin_unlock_bh(&tp->lock);
14014 data->val_out = mii_regval;
14020 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14021 break; /* We have no PHY */
14023 if (!netif_running(dev))
14026 spin_lock_bh(&tp->lock);
14027 err = __tg3_writephy(tp, data->phy_id & 0x1f,
14028 data->reg_num & 0x1f, data->val_in);
14029 spin_unlock_bh(&tp->lock);
14033 case SIOCSHWTSTAMP:
14034 return tg3_hwtstamp_set(dev, ifr);
14036 case SIOCGHWTSTAMP:
14037 return tg3_hwtstamp_get(dev, ifr);
14043 return -EOPNOTSUPP;
14046 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14048 struct tg3 *tp = netdev_priv(dev);
14050 memcpy(ec, &tp->coal, sizeof(*ec));
14054 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14056 struct tg3 *tp = netdev_priv(dev);
14057 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14058 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14060 if (!tg3_flag(tp, 5705_PLUS)) {
14061 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14062 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14063 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14064 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14067 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14068 (!ec->rx_coalesce_usecs) ||
14069 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14070 (!ec->tx_coalesce_usecs) ||
14071 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14072 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14073 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14074 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14075 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14076 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14077 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14078 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14081 /* Only copy relevant parameters, ignore all others. */
14082 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14083 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14084 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14085 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14086 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14087 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14088 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14089 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14090 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14092 if (netif_running(dev)) {
14093 tg3_full_lock(tp, 0);
14094 __tg3_set_coalesce(tp, &tp->coal);
14095 tg3_full_unlock(tp);
14100 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14102 struct tg3 *tp = netdev_priv(dev);
14104 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14105 netdev_warn(tp->dev, "Board does not support EEE!\n");
14106 return -EOPNOTSUPP;
14109 if (edata->advertised != tp->eee.advertised) {
14110 netdev_warn(tp->dev,
14111 "Direct manipulation of EEE advertisement is not supported\n");
14115 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14116 netdev_warn(tp->dev,
14117 "Maximal Tx Lpi timer supported is %#x(u)\n",
14118 TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14124 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14125 tg3_warn_mgmt_link_flap(tp);
14127 if (netif_running(tp->dev)) {
14128 tg3_full_lock(tp, 0);
14131 tg3_full_unlock(tp);
14137 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14139 struct tg3 *tp = netdev_priv(dev);
14141 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14142 netdev_warn(tp->dev,
14143 "Board does not support EEE!\n");
14144 return -EOPNOTSUPP;
14151 static const struct ethtool_ops tg3_ethtool_ops = {
14152 .get_drvinfo = tg3_get_drvinfo,
14153 .get_regs_len = tg3_get_regs_len,
14154 .get_regs = tg3_get_regs,
14155 .get_wol = tg3_get_wol,
14156 .set_wol = tg3_set_wol,
14157 .get_msglevel = tg3_get_msglevel,
14158 .set_msglevel = tg3_set_msglevel,
14159 .nway_reset = tg3_nway_reset,
14160 .get_link = ethtool_op_get_link,
14161 .get_eeprom_len = tg3_get_eeprom_len,
14162 .get_eeprom = tg3_get_eeprom,
14163 .set_eeprom = tg3_set_eeprom,
14164 .get_ringparam = tg3_get_ringparam,
14165 .set_ringparam = tg3_set_ringparam,
14166 .get_pauseparam = tg3_get_pauseparam,
14167 .set_pauseparam = tg3_set_pauseparam,
14168 .self_test = tg3_self_test,
14169 .get_strings = tg3_get_strings,
14170 .set_phys_id = tg3_set_phys_id,
14171 .get_ethtool_stats = tg3_get_ethtool_stats,
14172 .get_coalesce = tg3_get_coalesce,
14173 .set_coalesce = tg3_set_coalesce,
14174 .get_sset_count = tg3_get_sset_count,
14175 .get_rxnfc = tg3_get_rxnfc,
14176 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
14177 .get_rxfh = tg3_get_rxfh,
14178 .set_rxfh = tg3_set_rxfh,
14179 .get_channels = tg3_get_channels,
14180 .set_channels = tg3_set_channels,
14181 .get_ts_info = tg3_get_ts_info,
14182 .get_eee = tg3_get_eee,
14183 .set_eee = tg3_set_eee,
14184 .get_link_ksettings = tg3_get_link_ksettings,
14185 .set_link_ksettings = tg3_set_link_ksettings,
14188 static void tg3_get_stats64(struct net_device *dev,
14189 struct rtnl_link_stats64 *stats)
14191 struct tg3 *tp = netdev_priv(dev);
14193 spin_lock_bh(&tp->lock);
14194 if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14195 *stats = tp->net_stats_prev;
14196 spin_unlock_bh(&tp->lock);
14200 tg3_get_nstats(tp, stats);
14201 spin_unlock_bh(&tp->lock);
14204 static void tg3_set_rx_mode(struct net_device *dev)
14206 struct tg3 *tp = netdev_priv(dev);
14208 if (!netif_running(dev))
14211 tg3_full_lock(tp, 0);
14212 __tg3_set_rx_mode(dev);
14213 tg3_full_unlock(tp);
14216 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14219 dev->mtu = new_mtu;
14221 if (new_mtu > ETH_DATA_LEN) {
14222 if (tg3_flag(tp, 5780_CLASS)) {
14223 netdev_update_features(dev);
14224 tg3_flag_clear(tp, TSO_CAPABLE);
14226 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14229 if (tg3_flag(tp, 5780_CLASS)) {
14230 tg3_flag_set(tp, TSO_CAPABLE);
14231 netdev_update_features(dev);
14233 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14237 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14239 struct tg3 *tp = netdev_priv(dev);
14241 bool reset_phy = false;
14243 if (!netif_running(dev)) {
14244 /* We'll just catch it later when the
14247 tg3_set_mtu(dev, tp, new_mtu);
14253 tg3_netif_stop(tp);
14255 tg3_set_mtu(dev, tp, new_mtu);
14257 tg3_full_lock(tp, 1);
14259 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14261 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14262 * breaks all requests to 256 bytes.
14264 if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14265 tg3_asic_rev(tp) == ASIC_REV_5717 ||
14266 tg3_asic_rev(tp) == ASIC_REV_5719 ||
14267 tg3_asic_rev(tp) == ASIC_REV_5720)
14270 err = tg3_restart_hw(tp, reset_phy);
14273 tg3_netif_start(tp);
14275 tg3_full_unlock(tp);
14283 static const struct net_device_ops tg3_netdev_ops = {
14284 .ndo_open = tg3_open,
14285 .ndo_stop = tg3_close,
14286 .ndo_start_xmit = tg3_start_xmit,
14287 .ndo_get_stats64 = tg3_get_stats64,
14288 .ndo_validate_addr = eth_validate_addr,
14289 .ndo_set_rx_mode = tg3_set_rx_mode,
14290 .ndo_set_mac_address = tg3_set_mac_addr,
14291 .ndo_do_ioctl = tg3_ioctl,
14292 .ndo_tx_timeout = tg3_tx_timeout,
14293 .ndo_change_mtu = tg3_change_mtu,
14294 .ndo_fix_features = tg3_fix_features,
14295 .ndo_set_features = tg3_set_features,
14296 #ifdef CONFIG_NET_POLL_CONTROLLER
14297 .ndo_poll_controller = tg3_poll_controller,
14301 static void tg3_get_eeprom_size(struct tg3 *tp)
14303 u32 cursize, val, magic;
14305 tp->nvram_size = EEPROM_CHIP_SIZE;
14307 if (tg3_nvram_read(tp, 0, &magic) != 0)
14310 if ((magic != TG3_EEPROM_MAGIC) &&
14311 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14312 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14316 * Size the chip by reading offsets at increasing powers of two.
14317 * When we encounter our validation signature, we know the addressing
14318 * has wrapped around, and thus have our chip size.
14322 while (cursize < tp->nvram_size) {
14323 if (tg3_nvram_read(tp, cursize, &val) != 0)
14332 tp->nvram_size = cursize;
14335 static void tg3_get_nvram_size(struct tg3 *tp)
14339 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14342 /* Selfboot format */
14343 if (val != TG3_EEPROM_MAGIC) {
14344 tg3_get_eeprom_size(tp);
14348 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14350 /* This is confusing. We want to operate on the
14351 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14352 * call will read from NVRAM and byteswap the data
14353 * according to the byteswapping settings for all
14354 * other register accesses. This ensures the data we
14355 * want will always reside in the lower 16-bits.
14356 * However, the data in NVRAM is in LE format, which
14357 * means the data from the NVRAM read will always be
14358 * opposite the endianness of the CPU. The 16-bit
14359 * byteswap then brings the data to CPU endianness.
14361 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14365 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14368 static void tg3_get_nvram_info(struct tg3 *tp)
14372 nvcfg1 = tr32(NVRAM_CFG1);
14373 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14374 tg3_flag_set(tp, FLASH);
14376 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14377 tw32(NVRAM_CFG1, nvcfg1);
14380 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14381 tg3_flag(tp, 5780_CLASS)) {
14382 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14383 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14384 tp->nvram_jedecnum = JEDEC_ATMEL;
14385 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14386 tg3_flag_set(tp, NVRAM_BUFFERED);
14388 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14389 tp->nvram_jedecnum = JEDEC_ATMEL;
14390 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14392 case FLASH_VENDOR_ATMEL_EEPROM:
14393 tp->nvram_jedecnum = JEDEC_ATMEL;
14394 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14395 tg3_flag_set(tp, NVRAM_BUFFERED);
14397 case FLASH_VENDOR_ST:
14398 tp->nvram_jedecnum = JEDEC_ST;
14399 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14400 tg3_flag_set(tp, NVRAM_BUFFERED);
14402 case FLASH_VENDOR_SAIFUN:
14403 tp->nvram_jedecnum = JEDEC_SAIFUN;
14404 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14406 case FLASH_VENDOR_SST_SMALL:
14407 case FLASH_VENDOR_SST_LARGE:
14408 tp->nvram_jedecnum = JEDEC_SST;
14409 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14413 tp->nvram_jedecnum = JEDEC_ATMEL;
14414 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14415 tg3_flag_set(tp, NVRAM_BUFFERED);
14419 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14421 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14422 case FLASH_5752PAGE_SIZE_256:
14423 tp->nvram_pagesize = 256;
14425 case FLASH_5752PAGE_SIZE_512:
14426 tp->nvram_pagesize = 512;
14428 case FLASH_5752PAGE_SIZE_1K:
14429 tp->nvram_pagesize = 1024;
14431 case FLASH_5752PAGE_SIZE_2K:
14432 tp->nvram_pagesize = 2048;
14434 case FLASH_5752PAGE_SIZE_4K:
14435 tp->nvram_pagesize = 4096;
14437 case FLASH_5752PAGE_SIZE_264:
14438 tp->nvram_pagesize = 264;
14440 case FLASH_5752PAGE_SIZE_528:
14441 tp->nvram_pagesize = 528;
14446 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14450 nvcfg1 = tr32(NVRAM_CFG1);
14452 /* NVRAM protection for TPM */
14453 if (nvcfg1 & (1 << 27))
14454 tg3_flag_set(tp, PROTECTED_NVRAM);
14456 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14457 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14458 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14459 tp->nvram_jedecnum = JEDEC_ATMEL;
14460 tg3_flag_set(tp, NVRAM_BUFFERED);
14462 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14463 tp->nvram_jedecnum = JEDEC_ATMEL;
14464 tg3_flag_set(tp, NVRAM_BUFFERED);
14465 tg3_flag_set(tp, FLASH);
14467 case FLASH_5752VENDOR_ST_M45PE10:
14468 case FLASH_5752VENDOR_ST_M45PE20:
14469 case FLASH_5752VENDOR_ST_M45PE40:
14470 tp->nvram_jedecnum = JEDEC_ST;
14471 tg3_flag_set(tp, NVRAM_BUFFERED);
14472 tg3_flag_set(tp, FLASH);
14476 if (tg3_flag(tp, FLASH)) {
14477 tg3_nvram_get_pagesize(tp, nvcfg1);
14479 /* For eeprom, set pagesize to maximum eeprom size */
14480 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14482 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14483 tw32(NVRAM_CFG1, nvcfg1);
14487 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14489 u32 nvcfg1, protect = 0;
14491 nvcfg1 = tr32(NVRAM_CFG1);
14493 /* NVRAM protection for TPM */
14494 if (nvcfg1 & (1 << 27)) {
14495 tg3_flag_set(tp, PROTECTED_NVRAM);
14499 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14501 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14502 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14503 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14504 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14505 tp->nvram_jedecnum = JEDEC_ATMEL;
14506 tg3_flag_set(tp, NVRAM_BUFFERED);
14507 tg3_flag_set(tp, FLASH);
14508 tp->nvram_pagesize = 264;
14509 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14510 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14511 tp->nvram_size = (protect ? 0x3e200 :
14512 TG3_NVRAM_SIZE_512KB);
14513 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14514 tp->nvram_size = (protect ? 0x1f200 :
14515 TG3_NVRAM_SIZE_256KB);
14517 tp->nvram_size = (protect ? 0x1f200 :
14518 TG3_NVRAM_SIZE_128KB);
14520 case FLASH_5752VENDOR_ST_M45PE10:
14521 case FLASH_5752VENDOR_ST_M45PE20:
14522 case FLASH_5752VENDOR_ST_M45PE40:
14523 tp->nvram_jedecnum = JEDEC_ST;
14524 tg3_flag_set(tp, NVRAM_BUFFERED);
14525 tg3_flag_set(tp, FLASH);
14526 tp->nvram_pagesize = 256;
14527 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14528 tp->nvram_size = (protect ?
14529 TG3_NVRAM_SIZE_64KB :
14530 TG3_NVRAM_SIZE_128KB);
14531 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14532 tp->nvram_size = (protect ?
14533 TG3_NVRAM_SIZE_64KB :
14534 TG3_NVRAM_SIZE_256KB);
14536 tp->nvram_size = (protect ?
14537 TG3_NVRAM_SIZE_128KB :
14538 TG3_NVRAM_SIZE_512KB);
14543 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14547 nvcfg1 = tr32(NVRAM_CFG1);
14549 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14550 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14551 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14552 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14553 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14554 tp->nvram_jedecnum = JEDEC_ATMEL;
14555 tg3_flag_set(tp, NVRAM_BUFFERED);
14556 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14558 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14559 tw32(NVRAM_CFG1, nvcfg1);
14561 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14562 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14563 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14564 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14565 tp->nvram_jedecnum = JEDEC_ATMEL;
14566 tg3_flag_set(tp, NVRAM_BUFFERED);
14567 tg3_flag_set(tp, FLASH);
14568 tp->nvram_pagesize = 264;
14570 case FLASH_5752VENDOR_ST_M45PE10:
14571 case FLASH_5752VENDOR_ST_M45PE20:
14572 case FLASH_5752VENDOR_ST_M45PE40:
14573 tp->nvram_jedecnum = JEDEC_ST;
14574 tg3_flag_set(tp, NVRAM_BUFFERED);
14575 tg3_flag_set(tp, FLASH);
14576 tp->nvram_pagesize = 256;
14581 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14583 u32 nvcfg1, protect = 0;
14585 nvcfg1 = tr32(NVRAM_CFG1);
14587 /* NVRAM protection for TPM */
14588 if (nvcfg1 & (1 << 27)) {
14589 tg3_flag_set(tp, PROTECTED_NVRAM);
14593 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14595 case FLASH_5761VENDOR_ATMEL_ADB021D:
14596 case FLASH_5761VENDOR_ATMEL_ADB041D:
14597 case FLASH_5761VENDOR_ATMEL_ADB081D:
14598 case FLASH_5761VENDOR_ATMEL_ADB161D:
14599 case FLASH_5761VENDOR_ATMEL_MDB021D:
14600 case FLASH_5761VENDOR_ATMEL_MDB041D:
14601 case FLASH_5761VENDOR_ATMEL_MDB081D:
14602 case FLASH_5761VENDOR_ATMEL_MDB161D:
14603 tp->nvram_jedecnum = JEDEC_ATMEL;
14604 tg3_flag_set(tp, NVRAM_BUFFERED);
14605 tg3_flag_set(tp, FLASH);
14606 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14607 tp->nvram_pagesize = 256;
14609 case FLASH_5761VENDOR_ST_A_M45PE20:
14610 case FLASH_5761VENDOR_ST_A_M45PE40:
14611 case FLASH_5761VENDOR_ST_A_M45PE80:
14612 case FLASH_5761VENDOR_ST_A_M45PE16:
14613 case FLASH_5761VENDOR_ST_M_M45PE20:
14614 case FLASH_5761VENDOR_ST_M_M45PE40:
14615 case FLASH_5761VENDOR_ST_M_M45PE80:
14616 case FLASH_5761VENDOR_ST_M_M45PE16:
14617 tp->nvram_jedecnum = JEDEC_ST;
14618 tg3_flag_set(tp, NVRAM_BUFFERED);
14619 tg3_flag_set(tp, FLASH);
14620 tp->nvram_pagesize = 256;
14625 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14628 case FLASH_5761VENDOR_ATMEL_ADB161D:
14629 case FLASH_5761VENDOR_ATMEL_MDB161D:
14630 case FLASH_5761VENDOR_ST_A_M45PE16:
14631 case FLASH_5761VENDOR_ST_M_M45PE16:
14632 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14634 case FLASH_5761VENDOR_ATMEL_ADB081D:
14635 case FLASH_5761VENDOR_ATMEL_MDB081D:
14636 case FLASH_5761VENDOR_ST_A_M45PE80:
14637 case FLASH_5761VENDOR_ST_M_M45PE80:
14638 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14640 case FLASH_5761VENDOR_ATMEL_ADB041D:
14641 case FLASH_5761VENDOR_ATMEL_MDB041D:
14642 case FLASH_5761VENDOR_ST_A_M45PE40:
14643 case FLASH_5761VENDOR_ST_M_M45PE40:
14644 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14646 case FLASH_5761VENDOR_ATMEL_ADB021D:
14647 case FLASH_5761VENDOR_ATMEL_MDB021D:
14648 case FLASH_5761VENDOR_ST_A_M45PE20:
14649 case FLASH_5761VENDOR_ST_M_M45PE20:
14650 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14656 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14658 tp->nvram_jedecnum = JEDEC_ATMEL;
14659 tg3_flag_set(tp, NVRAM_BUFFERED);
14660 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14663 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14667 nvcfg1 = tr32(NVRAM_CFG1);
14669 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14670 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14671 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14672 tp->nvram_jedecnum = JEDEC_ATMEL;
14673 tg3_flag_set(tp, NVRAM_BUFFERED);
14674 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14676 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14677 tw32(NVRAM_CFG1, nvcfg1);
14679 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14680 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14681 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14682 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14683 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14684 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14685 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14686 tp->nvram_jedecnum = JEDEC_ATMEL;
14687 tg3_flag_set(tp, NVRAM_BUFFERED);
14688 tg3_flag_set(tp, FLASH);
14690 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14691 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14692 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14693 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14694 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14696 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14697 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14698 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14700 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14701 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14702 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14706 case FLASH_5752VENDOR_ST_M45PE10:
14707 case FLASH_5752VENDOR_ST_M45PE20:
14708 case FLASH_5752VENDOR_ST_M45PE40:
14709 tp->nvram_jedecnum = JEDEC_ST;
14710 tg3_flag_set(tp, NVRAM_BUFFERED);
14711 tg3_flag_set(tp, FLASH);
14713 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14714 case FLASH_5752VENDOR_ST_M45PE10:
14715 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14717 case FLASH_5752VENDOR_ST_M45PE20:
14718 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14720 case FLASH_5752VENDOR_ST_M45PE40:
14721 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14726 tg3_flag_set(tp, NO_NVRAM);
14730 tg3_nvram_get_pagesize(tp, nvcfg1);
14731 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14732 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14736 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14740 nvcfg1 = tr32(NVRAM_CFG1);
14742 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14743 case FLASH_5717VENDOR_ATMEL_EEPROM:
14744 case FLASH_5717VENDOR_MICRO_EEPROM:
14745 tp->nvram_jedecnum = JEDEC_ATMEL;
14746 tg3_flag_set(tp, NVRAM_BUFFERED);
14747 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14749 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14750 tw32(NVRAM_CFG1, nvcfg1);
14752 case FLASH_5717VENDOR_ATMEL_MDB011D:
14753 case FLASH_5717VENDOR_ATMEL_ADB011B:
14754 case FLASH_5717VENDOR_ATMEL_ADB011D:
14755 case FLASH_5717VENDOR_ATMEL_MDB021D:
14756 case FLASH_5717VENDOR_ATMEL_ADB021B:
14757 case FLASH_5717VENDOR_ATMEL_ADB021D:
14758 case FLASH_5717VENDOR_ATMEL_45USPT:
14759 tp->nvram_jedecnum = JEDEC_ATMEL;
14760 tg3_flag_set(tp, NVRAM_BUFFERED);
14761 tg3_flag_set(tp, FLASH);
14763 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14764 case FLASH_5717VENDOR_ATMEL_MDB021D:
14765 /* Detect size with tg3_nvram_get_size() */
14767 case FLASH_5717VENDOR_ATMEL_ADB021B:
14768 case FLASH_5717VENDOR_ATMEL_ADB021D:
14769 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14772 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14776 case FLASH_5717VENDOR_ST_M_M25PE10:
14777 case FLASH_5717VENDOR_ST_A_M25PE10:
14778 case FLASH_5717VENDOR_ST_M_M45PE10:
14779 case FLASH_5717VENDOR_ST_A_M45PE10:
14780 case FLASH_5717VENDOR_ST_M_M25PE20:
14781 case FLASH_5717VENDOR_ST_A_M25PE20:
14782 case FLASH_5717VENDOR_ST_M_M45PE20:
14783 case FLASH_5717VENDOR_ST_A_M45PE20:
14784 case FLASH_5717VENDOR_ST_25USPT:
14785 case FLASH_5717VENDOR_ST_45USPT:
14786 tp->nvram_jedecnum = JEDEC_ST;
14787 tg3_flag_set(tp, NVRAM_BUFFERED);
14788 tg3_flag_set(tp, FLASH);
14790 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14791 case FLASH_5717VENDOR_ST_M_M25PE20:
14792 case FLASH_5717VENDOR_ST_M_M45PE20:
14793 /* Detect size with tg3_nvram_get_size() */
14795 case FLASH_5717VENDOR_ST_A_M25PE20:
14796 case FLASH_5717VENDOR_ST_A_M45PE20:
14797 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14800 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14805 tg3_flag_set(tp, NO_NVRAM);
14809 tg3_nvram_get_pagesize(tp, nvcfg1);
14810 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14811 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14814 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14816 u32 nvcfg1, nvmpinstrp;
14818 nvcfg1 = tr32(NVRAM_CFG1);
14819 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14821 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14822 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14823 tg3_flag_set(tp, NO_NVRAM);
14827 switch (nvmpinstrp) {
14828 case FLASH_5762_EEPROM_HD:
14829 nvmpinstrp = FLASH_5720_EEPROM_HD;
14831 case FLASH_5762_EEPROM_LD:
14832 nvmpinstrp = FLASH_5720_EEPROM_LD;
14834 case FLASH_5720VENDOR_M_ST_M45PE20:
14835 /* This pinstrap supports multiple sizes, so force it
14836 * to read the actual size from location 0xf0.
14838 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14843 switch (nvmpinstrp) {
14844 case FLASH_5720_EEPROM_HD:
14845 case FLASH_5720_EEPROM_LD:
14846 tp->nvram_jedecnum = JEDEC_ATMEL;
14847 tg3_flag_set(tp, NVRAM_BUFFERED);
14849 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14850 tw32(NVRAM_CFG1, nvcfg1);
14851 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14852 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14854 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14856 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14857 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14858 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14859 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14860 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14861 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14862 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14863 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14864 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14865 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14866 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14867 case FLASH_5720VENDOR_ATMEL_45USPT:
14868 tp->nvram_jedecnum = JEDEC_ATMEL;
14869 tg3_flag_set(tp, NVRAM_BUFFERED);
14870 tg3_flag_set(tp, FLASH);
14872 switch (nvmpinstrp) {
14873 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14874 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14875 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14876 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14878 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14879 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14880 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14881 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14883 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14884 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14885 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14888 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14889 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14893 case FLASH_5720VENDOR_M_ST_M25PE10:
14894 case FLASH_5720VENDOR_M_ST_M45PE10:
14895 case FLASH_5720VENDOR_A_ST_M25PE10:
14896 case FLASH_5720VENDOR_A_ST_M45PE10:
14897 case FLASH_5720VENDOR_M_ST_M25PE20:
14898 case FLASH_5720VENDOR_M_ST_M45PE20:
14899 case FLASH_5720VENDOR_A_ST_M25PE20:
14900 case FLASH_5720VENDOR_A_ST_M45PE20:
14901 case FLASH_5720VENDOR_M_ST_M25PE40:
14902 case FLASH_5720VENDOR_M_ST_M45PE40:
14903 case FLASH_5720VENDOR_A_ST_M25PE40:
14904 case FLASH_5720VENDOR_A_ST_M45PE40:
14905 case FLASH_5720VENDOR_M_ST_M25PE80:
14906 case FLASH_5720VENDOR_M_ST_M45PE80:
14907 case FLASH_5720VENDOR_A_ST_M25PE80:
14908 case FLASH_5720VENDOR_A_ST_M45PE80:
14909 case FLASH_5720VENDOR_ST_25USPT:
14910 case FLASH_5720VENDOR_ST_45USPT:
14911 tp->nvram_jedecnum = JEDEC_ST;
14912 tg3_flag_set(tp, NVRAM_BUFFERED);
14913 tg3_flag_set(tp, FLASH);
14915 switch (nvmpinstrp) {
14916 case FLASH_5720VENDOR_M_ST_M25PE20:
14917 case FLASH_5720VENDOR_M_ST_M45PE20:
14918 case FLASH_5720VENDOR_A_ST_M25PE20:
14919 case FLASH_5720VENDOR_A_ST_M45PE20:
14920 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14922 case FLASH_5720VENDOR_M_ST_M25PE40:
14923 case FLASH_5720VENDOR_M_ST_M45PE40:
14924 case FLASH_5720VENDOR_A_ST_M25PE40:
14925 case FLASH_5720VENDOR_A_ST_M45PE40:
14926 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14928 case FLASH_5720VENDOR_M_ST_M25PE80:
14929 case FLASH_5720VENDOR_M_ST_M45PE80:
14930 case FLASH_5720VENDOR_A_ST_M25PE80:
14931 case FLASH_5720VENDOR_A_ST_M45PE80:
14932 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14935 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14936 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14941 tg3_flag_set(tp, NO_NVRAM);
14945 tg3_nvram_get_pagesize(tp, nvcfg1);
14946 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14947 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14949 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14952 if (tg3_nvram_read(tp, 0, &val))
14955 if (val != TG3_EEPROM_MAGIC &&
14956 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14957 tg3_flag_set(tp, NO_NVRAM);
14961 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14962 static void tg3_nvram_init(struct tg3 *tp)
14964 if (tg3_flag(tp, IS_SSB_CORE)) {
14965 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14966 tg3_flag_clear(tp, NVRAM);
14967 tg3_flag_clear(tp, NVRAM_BUFFERED);
14968 tg3_flag_set(tp, NO_NVRAM);
14972 tw32_f(GRC_EEPROM_ADDR,
14973 (EEPROM_ADDR_FSM_RESET |
14974 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14975 EEPROM_ADDR_CLKPERD_SHIFT)));
14979 /* Enable seeprom accesses. */
14980 tw32_f(GRC_LOCAL_CTRL,
14981 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14984 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14985 tg3_asic_rev(tp) != ASIC_REV_5701) {
14986 tg3_flag_set(tp, NVRAM);
14988 if (tg3_nvram_lock(tp)) {
14989 netdev_warn(tp->dev,
14990 "Cannot get nvram lock, %s failed\n",
14994 tg3_enable_nvram_access(tp);
14996 tp->nvram_size = 0;
14998 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14999 tg3_get_5752_nvram_info(tp);
15000 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15001 tg3_get_5755_nvram_info(tp);
15002 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15003 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15004 tg3_asic_rev(tp) == ASIC_REV_5785)
15005 tg3_get_5787_nvram_info(tp);
15006 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15007 tg3_get_5761_nvram_info(tp);
15008 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15009 tg3_get_5906_nvram_info(tp);
15010 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15011 tg3_flag(tp, 57765_CLASS))
15012 tg3_get_57780_nvram_info(tp);
15013 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15014 tg3_asic_rev(tp) == ASIC_REV_5719)
15015 tg3_get_5717_nvram_info(tp);
15016 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15017 tg3_asic_rev(tp) == ASIC_REV_5762)
15018 tg3_get_5720_nvram_info(tp);
15020 tg3_get_nvram_info(tp);
15022 if (tp->nvram_size == 0)
15023 tg3_get_nvram_size(tp);
15025 tg3_disable_nvram_access(tp);
15026 tg3_nvram_unlock(tp);
15029 tg3_flag_clear(tp, NVRAM);
15030 tg3_flag_clear(tp, NVRAM_BUFFERED);
15032 tg3_get_eeprom_size(tp);
15036 struct subsys_tbl_ent {
15037 u16 subsys_vendor, subsys_devid;
15041 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15042 /* Broadcom boards. */
15043 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15044 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15045 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15046 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15047 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15048 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15049 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15050 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15051 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15052 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15053 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15054 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15055 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15056 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15057 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15058 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15059 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15060 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15061 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15062 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15063 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15064 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15067 { TG3PCI_SUBVENDOR_ID_3COM,
15068 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15069 { TG3PCI_SUBVENDOR_ID_3COM,
15070 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15071 { TG3PCI_SUBVENDOR_ID_3COM,
15072 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15073 { TG3PCI_SUBVENDOR_ID_3COM,
15074 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15075 { TG3PCI_SUBVENDOR_ID_3COM,
15076 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15079 { TG3PCI_SUBVENDOR_ID_DELL,
15080 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15081 { TG3PCI_SUBVENDOR_ID_DELL,
15082 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15083 { TG3PCI_SUBVENDOR_ID_DELL,
15084 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15085 { TG3PCI_SUBVENDOR_ID_DELL,
15086 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15088 /* Compaq boards. */
15089 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15090 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15091 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15092 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15093 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15094 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15095 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15096 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15097 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15098 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15101 { TG3PCI_SUBVENDOR_ID_IBM,
15102 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15105 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15109 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15110 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15111 tp->pdev->subsystem_vendor) &&
15112 (subsys_id_to_phy_id[i].subsys_devid ==
15113 tp->pdev->subsystem_device))
15114 return &subsys_id_to_phy_id[i];
15119 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15123 tp->phy_id = TG3_PHY_ID_INVALID;
15124 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15126 /* Assume an onboard device and WOL capable by default. */
15127 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15128 tg3_flag_set(tp, WOL_CAP);
15130 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15131 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15132 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15133 tg3_flag_set(tp, IS_NIC);
15135 val = tr32(VCPU_CFGSHDW);
15136 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15137 tg3_flag_set(tp, ASPM_WORKAROUND);
15138 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15139 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15140 tg3_flag_set(tp, WOL_ENABLE);
15141 device_set_wakeup_enable(&tp->pdev->dev, true);
15146 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15147 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15148 u32 nic_cfg, led_cfg;
15149 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15150 u32 nic_phy_id, ver, eeprom_phy_id;
15151 int eeprom_phy_serdes = 0;
15153 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15154 tp->nic_sram_data_cfg = nic_cfg;
15156 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15157 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15158 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15159 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15160 tg3_asic_rev(tp) != ASIC_REV_5703 &&
15161 (ver > 0) && (ver < 0x100))
15162 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15164 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15165 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15167 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15168 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15169 tg3_asic_rev(tp) == ASIC_REV_5720)
15170 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15172 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15173 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15174 eeprom_phy_serdes = 1;
15176 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15177 if (nic_phy_id != 0) {
15178 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15179 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15181 eeprom_phy_id = (id1 >> 16) << 10;
15182 eeprom_phy_id |= (id2 & 0xfc00) << 16;
15183 eeprom_phy_id |= (id2 & 0x03ff) << 0;
15187 tp->phy_id = eeprom_phy_id;
15188 if (eeprom_phy_serdes) {
15189 if (!tg3_flag(tp, 5705_PLUS))
15190 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15192 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15195 if (tg3_flag(tp, 5750_PLUS))
15196 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15197 SHASTA_EXT_LED_MODE_MASK);
15199 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15203 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15204 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15207 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15208 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15211 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15212 tp->led_ctrl = LED_CTRL_MODE_MAC;
15214 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15215 * read on some older 5700/5701 bootcode.
15217 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15218 tg3_asic_rev(tp) == ASIC_REV_5701)
15219 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15223 case SHASTA_EXT_LED_SHARED:
15224 tp->led_ctrl = LED_CTRL_MODE_SHARED;
15225 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15226 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15227 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15228 LED_CTRL_MODE_PHY_2);
15230 if (tg3_flag(tp, 5717_PLUS) ||
15231 tg3_asic_rev(tp) == ASIC_REV_5762)
15232 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15233 LED_CTRL_BLINK_RATE_MASK;
15237 case SHASTA_EXT_LED_MAC:
15238 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15241 case SHASTA_EXT_LED_COMBO:
15242 tp->led_ctrl = LED_CTRL_MODE_COMBO;
15243 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15244 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15245 LED_CTRL_MODE_PHY_2);
15250 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15251 tg3_asic_rev(tp) == ASIC_REV_5701) &&
15252 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15253 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15255 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15256 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15258 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15259 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15260 if ((tp->pdev->subsystem_vendor ==
15261 PCI_VENDOR_ID_ARIMA) &&
15262 (tp->pdev->subsystem_device == 0x205a ||
15263 tp->pdev->subsystem_device == 0x2063))
15264 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15266 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15267 tg3_flag_set(tp, IS_NIC);
15270 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15271 tg3_flag_set(tp, ENABLE_ASF);
15272 if (tg3_flag(tp, 5750_PLUS))
15273 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15276 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15277 tg3_flag(tp, 5750_PLUS))
15278 tg3_flag_set(tp, ENABLE_APE);
15280 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15281 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15282 tg3_flag_clear(tp, WOL_CAP);
15284 if (tg3_flag(tp, WOL_CAP) &&
15285 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15286 tg3_flag_set(tp, WOL_ENABLE);
15287 device_set_wakeup_enable(&tp->pdev->dev, true);
15290 if (cfg2 & (1 << 17))
15291 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15293 /* serdes signal pre-emphasis in register 0x590 set by */
15294 /* bootcode if bit 18 is set */
15295 if (cfg2 & (1 << 18))
15296 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15298 if ((tg3_flag(tp, 57765_PLUS) ||
15299 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15300 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15301 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15302 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15304 if (tg3_flag(tp, PCI_EXPRESS)) {
15307 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15308 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15309 !tg3_flag(tp, 57765_PLUS) &&
15310 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15311 tg3_flag_set(tp, ASPM_WORKAROUND);
15312 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15313 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15314 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15315 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15318 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15319 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15320 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15321 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15322 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15323 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15325 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15326 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15329 if (tg3_flag(tp, WOL_CAP))
15330 device_set_wakeup_enable(&tp->pdev->dev,
15331 tg3_flag(tp, WOL_ENABLE));
15333 device_set_wakeup_capable(&tp->pdev->dev, false);
15336 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15339 u32 val2, off = offset * 8;
15341 err = tg3_nvram_lock(tp);
15345 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15346 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15347 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15348 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15351 for (i = 0; i < 100; i++) {
15352 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15353 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15354 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15360 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15362 tg3_nvram_unlock(tp);
15363 if (val2 & APE_OTP_STATUS_CMD_DONE)
15369 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15374 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15375 tw32(OTP_CTRL, cmd);
15377 /* Wait for up to 1 ms for command to execute. */
15378 for (i = 0; i < 100; i++) {
15379 val = tr32(OTP_STATUS);
15380 if (val & OTP_STATUS_CMD_DONE)
15385 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15388 /* Read the gphy configuration from the OTP region of the chip. The gphy
15389 * configuration is a 32-bit value that straddles the alignment boundary.
15390 * We do two 32-bit reads and then shift and merge the results.
15392 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15394 u32 bhalf_otp, thalf_otp;
15396 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15398 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15401 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15403 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15406 thalf_otp = tr32(OTP_READ_DATA);
15408 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15410 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15413 bhalf_otp = tr32(OTP_READ_DATA);
15415 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15418 static void tg3_phy_init_link_config(struct tg3 *tp)
15420 u32 adv = ADVERTISED_Autoneg;
15422 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15423 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15424 adv |= ADVERTISED_1000baseT_Half;
15425 adv |= ADVERTISED_1000baseT_Full;
15428 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15429 adv |= ADVERTISED_100baseT_Half |
15430 ADVERTISED_100baseT_Full |
15431 ADVERTISED_10baseT_Half |
15432 ADVERTISED_10baseT_Full |
15435 adv |= ADVERTISED_FIBRE;
15437 tp->link_config.advertising = adv;
15438 tp->link_config.speed = SPEED_UNKNOWN;
15439 tp->link_config.duplex = DUPLEX_UNKNOWN;
15440 tp->link_config.autoneg = AUTONEG_ENABLE;
15441 tp->link_config.active_speed = SPEED_UNKNOWN;
15442 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15447 static int tg3_phy_probe(struct tg3 *tp)
15449 u32 hw_phy_id_1, hw_phy_id_2;
15450 u32 hw_phy_id, hw_phy_id_masked;
15453 /* flow control autonegotiation is default behavior */
15454 tg3_flag_set(tp, PAUSE_AUTONEG);
15455 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15457 if (tg3_flag(tp, ENABLE_APE)) {
15458 switch (tp->pci_fn) {
15460 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15463 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15466 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15469 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15474 if (!tg3_flag(tp, ENABLE_ASF) &&
15475 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15476 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15477 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15478 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15480 if (tg3_flag(tp, USE_PHYLIB))
15481 return tg3_phy_init(tp);
15483 /* Reading the PHY ID register can conflict with ASF
15484 * firmware access to the PHY hardware.
15487 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15488 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15490 /* Now read the physical PHY_ID from the chip and verify
15491 * that it is sane. If it doesn't look good, we fall back
15492 * to either the hard-coded table based PHY_ID and failing
15493 * that the value found in the eeprom area.
15495 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15496 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15498 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15499 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15500 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15502 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15505 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15506 tp->phy_id = hw_phy_id;
15507 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15508 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15510 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15512 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15513 /* Do nothing, phy ID already set up in
15514 * tg3_get_eeprom_hw_cfg().
15517 struct subsys_tbl_ent *p;
15519 /* No eeprom signature? Try the hardcoded
15520 * subsys device table.
15522 p = tg3_lookup_by_subsys(tp);
15524 tp->phy_id = p->phy_id;
15525 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15526 /* For now we saw the IDs 0xbc050cd0,
15527 * 0xbc050f80 and 0xbc050c30 on devices
15528 * connected to an BCM4785 and there are
15529 * probably more. Just assume that the phy is
15530 * supported when it is connected to a SSB core
15537 tp->phy_id == TG3_PHY_ID_BCM8002)
15538 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15542 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15543 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15544 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15545 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15546 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15547 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15548 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15549 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15550 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15551 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15553 tp->eee.supported = SUPPORTED_100baseT_Full |
15554 SUPPORTED_1000baseT_Full;
15555 tp->eee.advertised = ADVERTISED_100baseT_Full |
15556 ADVERTISED_1000baseT_Full;
15557 tp->eee.eee_enabled = 1;
15558 tp->eee.tx_lpi_enabled = 1;
15559 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15562 tg3_phy_init_link_config(tp);
15564 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15565 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15566 !tg3_flag(tp, ENABLE_APE) &&
15567 !tg3_flag(tp, ENABLE_ASF)) {
15570 tg3_readphy(tp, MII_BMSR, &bmsr);
15571 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15572 (bmsr & BMSR_LSTATUS))
15573 goto skip_phy_reset;
15575 err = tg3_phy_reset(tp);
15579 tg3_phy_set_wirespeed(tp);
15581 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15582 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15583 tp->link_config.flowctrl);
15585 tg3_writephy(tp, MII_BMCR,
15586 BMCR_ANENABLE | BMCR_ANRESTART);
15591 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15592 err = tg3_init_5401phy_dsp(tp);
15596 err = tg3_init_5401phy_dsp(tp);
15602 static void tg3_read_vpd(struct tg3 *tp)
15605 unsigned int block_end, rosize, len;
15609 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15613 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15615 goto out_not_found;
15617 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15618 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15619 i += PCI_VPD_LRDT_TAG_SIZE;
15621 if (block_end > vpdlen)
15622 goto out_not_found;
15624 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15625 PCI_VPD_RO_KEYWORD_MFR_ID);
15627 len = pci_vpd_info_field_size(&vpd_data[j]);
15629 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15630 if (j + len > block_end || len != 4 ||
15631 memcmp(&vpd_data[j], "1028", 4))
15634 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15635 PCI_VPD_RO_KEYWORD_VENDOR0);
15639 len = pci_vpd_info_field_size(&vpd_data[j]);
15641 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15642 if (j + len > block_end)
15645 if (len >= sizeof(tp->fw_ver))
15646 len = sizeof(tp->fw_ver) - 1;
15647 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15648 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15653 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15654 PCI_VPD_RO_KEYWORD_PARTNO);
15656 goto out_not_found;
15658 len = pci_vpd_info_field_size(&vpd_data[i]);
15660 i += PCI_VPD_INFO_FLD_HDR_SIZE;
15661 if (len > TG3_BPN_SIZE ||
15662 (len + i) > vpdlen)
15663 goto out_not_found;
15665 memcpy(tp->board_part_number, &vpd_data[i], len);
15669 if (tp->board_part_number[0])
15673 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15674 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15675 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15676 strcpy(tp->board_part_number, "BCM5717");
15677 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15678 strcpy(tp->board_part_number, "BCM5718");
15681 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15682 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15683 strcpy(tp->board_part_number, "BCM57780");
15684 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15685 strcpy(tp->board_part_number, "BCM57760");
15686 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15687 strcpy(tp->board_part_number, "BCM57790");
15688 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15689 strcpy(tp->board_part_number, "BCM57788");
15692 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15693 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15694 strcpy(tp->board_part_number, "BCM57761");
15695 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15696 strcpy(tp->board_part_number, "BCM57765");
15697 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15698 strcpy(tp->board_part_number, "BCM57781");
15699 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15700 strcpy(tp->board_part_number, "BCM57785");
15701 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15702 strcpy(tp->board_part_number, "BCM57791");
15703 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15704 strcpy(tp->board_part_number, "BCM57795");
15707 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15708 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15709 strcpy(tp->board_part_number, "BCM57762");
15710 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15711 strcpy(tp->board_part_number, "BCM57766");
15712 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15713 strcpy(tp->board_part_number, "BCM57782");
15714 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15715 strcpy(tp->board_part_number, "BCM57786");
15718 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15719 strcpy(tp->board_part_number, "BCM95906");
15722 strcpy(tp->board_part_number, "none");
15726 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15730 if (tg3_nvram_read(tp, offset, &val) ||
15731 (val & 0xfc000000) != 0x0c000000 ||
15732 tg3_nvram_read(tp, offset + 4, &val) ||
15739 static void tg3_read_bc_ver(struct tg3 *tp)
15741 u32 val, offset, start, ver_offset;
15743 bool newver = false;
15745 if (tg3_nvram_read(tp, 0xc, &offset) ||
15746 tg3_nvram_read(tp, 0x4, &start))
15749 offset = tg3_nvram_logical_addr(tp, offset);
15751 if (tg3_nvram_read(tp, offset, &val))
15754 if ((val & 0xfc000000) == 0x0c000000) {
15755 if (tg3_nvram_read(tp, offset + 4, &val))
15762 dst_off = strlen(tp->fw_ver);
15765 if (TG3_VER_SIZE - dst_off < 16 ||
15766 tg3_nvram_read(tp, offset + 8, &ver_offset))
15769 offset = offset + ver_offset - start;
15770 for (i = 0; i < 16; i += 4) {
15772 if (tg3_nvram_read_be32(tp, offset + i, &v))
15775 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15780 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15783 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15784 TG3_NVM_BCVER_MAJSFT;
15785 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15786 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15787 "v%d.%02d", major, minor);
15791 static void tg3_read_hwsb_ver(struct tg3 *tp)
15793 u32 val, major, minor;
15795 /* Use native endian representation */
15796 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15799 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15800 TG3_NVM_HWSB_CFG1_MAJSFT;
15801 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15802 TG3_NVM_HWSB_CFG1_MINSFT;
15804 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15807 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15809 u32 offset, major, minor, build;
15811 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15813 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15816 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15817 case TG3_EEPROM_SB_REVISION_0:
15818 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15820 case TG3_EEPROM_SB_REVISION_2:
15821 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15823 case TG3_EEPROM_SB_REVISION_3:
15824 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15826 case TG3_EEPROM_SB_REVISION_4:
15827 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15829 case TG3_EEPROM_SB_REVISION_5:
15830 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15832 case TG3_EEPROM_SB_REVISION_6:
15833 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15839 if (tg3_nvram_read(tp, offset, &val))
15842 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15843 TG3_EEPROM_SB_EDH_BLD_SHFT;
15844 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15845 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15846 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15848 if (minor > 99 || build > 26)
15851 offset = strlen(tp->fw_ver);
15852 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15853 " v%d.%02d", major, minor);
15856 offset = strlen(tp->fw_ver);
15857 if (offset < TG3_VER_SIZE - 1)
15858 tp->fw_ver[offset] = 'a' + build - 1;
15862 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15864 u32 val, offset, start;
15867 for (offset = TG3_NVM_DIR_START;
15868 offset < TG3_NVM_DIR_END;
15869 offset += TG3_NVM_DIRENT_SIZE) {
15870 if (tg3_nvram_read(tp, offset, &val))
15873 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15877 if (offset == TG3_NVM_DIR_END)
15880 if (!tg3_flag(tp, 5705_PLUS))
15881 start = 0x08000000;
15882 else if (tg3_nvram_read(tp, offset - 4, &start))
15885 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15886 !tg3_fw_img_is_valid(tp, offset) ||
15887 tg3_nvram_read(tp, offset + 8, &val))
15890 offset += val - start;
15892 vlen = strlen(tp->fw_ver);
15894 tp->fw_ver[vlen++] = ',';
15895 tp->fw_ver[vlen++] = ' ';
15897 for (i = 0; i < 4; i++) {
15899 if (tg3_nvram_read_be32(tp, offset, &v))
15902 offset += sizeof(v);
15904 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15905 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15909 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15914 static void tg3_probe_ncsi(struct tg3 *tp)
15918 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15919 if (apedata != APE_SEG_SIG_MAGIC)
15922 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15923 if (!(apedata & APE_FW_STATUS_READY))
15926 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15927 tg3_flag_set(tp, APE_HAS_NCSI);
15930 static void tg3_read_dash_ver(struct tg3 *tp)
15936 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15938 if (tg3_flag(tp, APE_HAS_NCSI))
15940 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15945 vlen = strlen(tp->fw_ver);
15947 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15949 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15950 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15951 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15952 (apedata & APE_FW_VERSION_BLDMSK));
15955 static void tg3_read_otp_ver(struct tg3 *tp)
15959 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15962 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15963 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15964 TG3_OTP_MAGIC0_VALID(val)) {
15965 u64 val64 = (u64) val << 32 | val2;
15969 for (i = 0; i < 7; i++) {
15970 if ((val64 & 0xff) == 0)
15972 ver = val64 & 0xff;
15975 vlen = strlen(tp->fw_ver);
15976 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15980 static void tg3_read_fw_ver(struct tg3 *tp)
15983 bool vpd_vers = false;
15985 if (tp->fw_ver[0] != 0)
15988 if (tg3_flag(tp, NO_NVRAM)) {
15989 strcat(tp->fw_ver, "sb");
15990 tg3_read_otp_ver(tp);
15994 if (tg3_nvram_read(tp, 0, &val))
15997 if (val == TG3_EEPROM_MAGIC)
15998 tg3_read_bc_ver(tp);
15999 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
16000 tg3_read_sb_ver(tp, val);
16001 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16002 tg3_read_hwsb_ver(tp);
16004 if (tg3_flag(tp, ENABLE_ASF)) {
16005 if (tg3_flag(tp, ENABLE_APE)) {
16006 tg3_probe_ncsi(tp);
16008 tg3_read_dash_ver(tp);
16009 } else if (!vpd_vers) {
16010 tg3_read_mgmtfw_ver(tp);
16014 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16017 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16019 if (tg3_flag(tp, LRG_PROD_RING_CAP))
16020 return TG3_RX_RET_MAX_SIZE_5717;
16021 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16022 return TG3_RX_RET_MAX_SIZE_5700;
16024 return TG3_RX_RET_MAX_SIZE_5705;
16027 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16028 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16029 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16030 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16034 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16036 struct pci_dev *peer;
16037 unsigned int func, devnr = tp->pdev->devfn & ~7;
16039 for (func = 0; func < 8; func++) {
16040 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16041 if (peer && peer != tp->pdev)
16045 /* 5704 can be configured in single-port mode, set peer to
16046 * tp->pdev in that case.
16054 * We don't need to keep the refcount elevated; there's no way
16055 * to remove one half of this device without removing the other
16062 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16064 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16065 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16068 /* All devices that use the alternate
16069 * ASIC REV location have a CPMU.
16071 tg3_flag_set(tp, CPMU_PRESENT);
16073 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16074 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16075 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16076 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16077 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16078 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16079 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16080 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16081 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16082 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16083 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16084 reg = TG3PCI_GEN2_PRODID_ASICREV;
16085 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16086 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16087 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16088 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16089 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16090 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16091 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16092 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16093 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16094 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16095 reg = TG3PCI_GEN15_PRODID_ASICREV;
16097 reg = TG3PCI_PRODID_ASICREV;
16099 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16102 /* Wrong chip ID in 5752 A0. This code can be removed later
16103 * as A0 is not in production.
16105 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16106 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16108 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16109 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16111 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16112 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16113 tg3_asic_rev(tp) == ASIC_REV_5720)
16114 tg3_flag_set(tp, 5717_PLUS);
16116 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16117 tg3_asic_rev(tp) == ASIC_REV_57766)
16118 tg3_flag_set(tp, 57765_CLASS);
16120 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16121 tg3_asic_rev(tp) == ASIC_REV_5762)
16122 tg3_flag_set(tp, 57765_PLUS);
16124 /* Intentionally exclude ASIC_REV_5906 */
16125 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16126 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16127 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16128 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16129 tg3_asic_rev(tp) == ASIC_REV_5785 ||
16130 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16131 tg3_flag(tp, 57765_PLUS))
16132 tg3_flag_set(tp, 5755_PLUS);
16134 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16135 tg3_asic_rev(tp) == ASIC_REV_5714)
16136 tg3_flag_set(tp, 5780_CLASS);
16138 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16139 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16140 tg3_asic_rev(tp) == ASIC_REV_5906 ||
16141 tg3_flag(tp, 5755_PLUS) ||
16142 tg3_flag(tp, 5780_CLASS))
16143 tg3_flag_set(tp, 5750_PLUS);
16145 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16146 tg3_flag(tp, 5750_PLUS))
16147 tg3_flag_set(tp, 5705_PLUS);
16150 static bool tg3_10_100_only_device(struct tg3 *tp,
16151 const struct pci_device_id *ent)
16153 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16155 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16156 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16157 (tp->phy_flags & TG3_PHYFLG_IS_FET))
16160 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16161 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16162 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16172 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16175 u32 pci_state_reg, grc_misc_cfg;
16180 /* Force memory write invalidate off. If we leave it on,
16181 * then on 5700_BX chips we have to enable a workaround.
16182 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16183 * to match the cacheline size. The Broadcom driver have this
16184 * workaround but turns MWI off all the times so never uses
16185 * it. This seems to suggest that the workaround is insufficient.
16187 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16188 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16189 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16191 /* Important! -- Make sure register accesses are byteswapped
16192 * correctly. Also, for those chips that require it, make
16193 * sure that indirect register accesses are enabled before
16194 * the first operation.
16196 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16198 tp->misc_host_ctrl |= (misc_ctrl_reg &
16199 MISC_HOST_CTRL_CHIPREV);
16200 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16201 tp->misc_host_ctrl);
16203 tg3_detect_asic_rev(tp, misc_ctrl_reg);
16205 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16206 * we need to disable memory and use config. cycles
16207 * only to access all registers. The 5702/03 chips
16208 * can mistakenly decode the special cycles from the
16209 * ICH chipsets as memory write cycles, causing corruption
16210 * of register and memory space. Only certain ICH bridges
16211 * will drive special cycles with non-zero data during the
16212 * address phase which can fall within the 5703's address
16213 * range. This is not an ICH bug as the PCI spec allows
16214 * non-zero address during special cycles. However, only
16215 * these ICH bridges are known to drive non-zero addresses
16216 * during special cycles.
16218 * Since special cycles do not cross PCI bridges, we only
16219 * enable this workaround if the 5703 is on the secondary
16220 * bus of these ICH bridges.
16222 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16223 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16224 static struct tg3_dev_id {
16228 } ich_chipsets[] = {
16229 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16231 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16233 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16235 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16239 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16240 struct pci_dev *bridge = NULL;
16242 while (pci_id->vendor != 0) {
16243 bridge = pci_get_device(pci_id->vendor, pci_id->device,
16249 if (pci_id->rev != PCI_ANY_ID) {
16250 if (bridge->revision > pci_id->rev)
16253 if (bridge->subordinate &&
16254 (bridge->subordinate->number ==
16255 tp->pdev->bus->number)) {
16256 tg3_flag_set(tp, ICH_WORKAROUND);
16257 pci_dev_put(bridge);
16263 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16264 static struct tg3_dev_id {
16267 } bridge_chipsets[] = {
16268 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16269 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16272 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16273 struct pci_dev *bridge = NULL;
16275 while (pci_id->vendor != 0) {
16276 bridge = pci_get_device(pci_id->vendor,
16283 if (bridge->subordinate &&
16284 (bridge->subordinate->number <=
16285 tp->pdev->bus->number) &&
16286 (bridge->subordinate->busn_res.end >=
16287 tp->pdev->bus->number)) {
16288 tg3_flag_set(tp, 5701_DMA_BUG);
16289 pci_dev_put(bridge);
16295 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16296 * DMA addresses > 40-bit. This bridge may have other additional
16297 * 57xx devices behind it in some 4-port NIC designs for example.
16298 * Any tg3 device found behind the bridge will also need the 40-bit
16301 if (tg3_flag(tp, 5780_CLASS)) {
16302 tg3_flag_set(tp, 40BIT_DMA_BUG);
16303 tp->msi_cap = tp->pdev->msi_cap;
16305 struct pci_dev *bridge = NULL;
16308 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16309 PCI_DEVICE_ID_SERVERWORKS_EPB,
16311 if (bridge && bridge->subordinate &&
16312 (bridge->subordinate->number <=
16313 tp->pdev->bus->number) &&
16314 (bridge->subordinate->busn_res.end >=
16315 tp->pdev->bus->number)) {
16316 tg3_flag_set(tp, 40BIT_DMA_BUG);
16317 pci_dev_put(bridge);
16323 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16324 tg3_asic_rev(tp) == ASIC_REV_5714)
16325 tp->pdev_peer = tg3_find_peer(tp);
16327 /* Determine TSO capabilities */
16328 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16329 ; /* Do nothing. HW bug. */
16330 else if (tg3_flag(tp, 57765_PLUS))
16331 tg3_flag_set(tp, HW_TSO_3);
16332 else if (tg3_flag(tp, 5755_PLUS) ||
16333 tg3_asic_rev(tp) == ASIC_REV_5906)
16334 tg3_flag_set(tp, HW_TSO_2);
16335 else if (tg3_flag(tp, 5750_PLUS)) {
16336 tg3_flag_set(tp, HW_TSO_1);
16337 tg3_flag_set(tp, TSO_BUG);
16338 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16339 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16340 tg3_flag_clear(tp, TSO_BUG);
16341 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16342 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16343 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16344 tg3_flag_set(tp, FW_TSO);
16345 tg3_flag_set(tp, TSO_BUG);
16346 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16347 tp->fw_needed = FIRMWARE_TG3TSO5;
16349 tp->fw_needed = FIRMWARE_TG3TSO;
16352 /* Selectively allow TSO based on operating conditions */
16353 if (tg3_flag(tp, HW_TSO_1) ||
16354 tg3_flag(tp, HW_TSO_2) ||
16355 tg3_flag(tp, HW_TSO_3) ||
16356 tg3_flag(tp, FW_TSO)) {
16357 /* For firmware TSO, assume ASF is disabled.
16358 * We'll disable TSO later if we discover ASF
16359 * is enabled in tg3_get_eeprom_hw_cfg().
16361 tg3_flag_set(tp, TSO_CAPABLE);
16363 tg3_flag_clear(tp, TSO_CAPABLE);
16364 tg3_flag_clear(tp, TSO_BUG);
16365 tp->fw_needed = NULL;
16368 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16369 tp->fw_needed = FIRMWARE_TG3;
16371 if (tg3_asic_rev(tp) == ASIC_REV_57766)
16372 tp->fw_needed = FIRMWARE_TG357766;
16376 if (tg3_flag(tp, 5750_PLUS)) {
16377 tg3_flag_set(tp, SUPPORT_MSI);
16378 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16379 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16380 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16381 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16382 tp->pdev_peer == tp->pdev))
16383 tg3_flag_clear(tp, SUPPORT_MSI);
16385 if (tg3_flag(tp, 5755_PLUS) ||
16386 tg3_asic_rev(tp) == ASIC_REV_5906) {
16387 tg3_flag_set(tp, 1SHOT_MSI);
16390 if (tg3_flag(tp, 57765_PLUS)) {
16391 tg3_flag_set(tp, SUPPORT_MSIX);
16392 tp->irq_max = TG3_IRQ_MAX_VECS;
16398 if (tp->irq_max > 1) {
16399 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16400 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16402 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16403 tg3_asic_rev(tp) == ASIC_REV_5720)
16404 tp->txq_max = tp->irq_max - 1;
16407 if (tg3_flag(tp, 5755_PLUS) ||
16408 tg3_asic_rev(tp) == ASIC_REV_5906)
16409 tg3_flag_set(tp, SHORT_DMA_BUG);
16411 if (tg3_asic_rev(tp) == ASIC_REV_5719)
16412 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16414 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16415 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16416 tg3_asic_rev(tp) == ASIC_REV_5720 ||
16417 tg3_asic_rev(tp) == ASIC_REV_5762)
16418 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16420 if (tg3_flag(tp, 57765_PLUS) &&
16421 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16422 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16424 if (!tg3_flag(tp, 5705_PLUS) ||
16425 tg3_flag(tp, 5780_CLASS) ||
16426 tg3_flag(tp, USE_JUMBO_BDFLAG))
16427 tg3_flag_set(tp, JUMBO_CAPABLE);
16429 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16432 if (pci_is_pcie(tp->pdev)) {
16435 tg3_flag_set(tp, PCI_EXPRESS);
16437 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16438 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16439 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16440 tg3_flag_clear(tp, HW_TSO_2);
16441 tg3_flag_clear(tp, TSO_CAPABLE);
16443 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16444 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16445 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16446 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16447 tg3_flag_set(tp, CLKREQ_BUG);
16448 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16449 tg3_flag_set(tp, L1PLLPD_EN);
16451 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16452 /* BCM5785 devices are effectively PCIe devices, and should
16453 * follow PCIe codepaths, but do not have a PCIe capabilities
16456 tg3_flag_set(tp, PCI_EXPRESS);
16457 } else if (!tg3_flag(tp, 5705_PLUS) ||
16458 tg3_flag(tp, 5780_CLASS)) {
16459 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16460 if (!tp->pcix_cap) {
16461 dev_err(&tp->pdev->dev,
16462 "Cannot find PCI-X capability, aborting\n");
16466 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16467 tg3_flag_set(tp, PCIX_MODE);
16470 /* If we have an AMD 762 or VIA K8T800 chipset, write
16471 * reordering to the mailbox registers done by the host
16472 * controller can cause major troubles. We read back from
16473 * every mailbox register write to force the writes to be
16474 * posted to the chip in order.
16476 if (pci_dev_present(tg3_write_reorder_chipsets) &&
16477 !tg3_flag(tp, PCI_EXPRESS))
16478 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16480 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16481 &tp->pci_cacheline_sz);
16482 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16483 &tp->pci_lat_timer);
16484 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16485 tp->pci_lat_timer < 64) {
16486 tp->pci_lat_timer = 64;
16487 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16488 tp->pci_lat_timer);
16491 /* Important! -- It is critical that the PCI-X hw workaround
16492 * situation is decided before the first MMIO register access.
16494 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16495 /* 5700 BX chips need to have their TX producer index
16496 * mailboxes written twice to workaround a bug.
16498 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16500 /* If we are in PCI-X mode, enable register write workaround.
16502 * The workaround is to use indirect register accesses
16503 * for all chip writes not to mailbox registers.
16505 if (tg3_flag(tp, PCIX_MODE)) {
16508 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16510 /* The chip can have it's power management PCI config
16511 * space registers clobbered due to this bug.
16512 * So explicitly force the chip into D0 here.
16514 pci_read_config_dword(tp->pdev,
16515 tp->pdev->pm_cap + PCI_PM_CTRL,
16517 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16518 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16519 pci_write_config_dword(tp->pdev,
16520 tp->pdev->pm_cap + PCI_PM_CTRL,
16523 /* Also, force SERR#/PERR# in PCI command. */
16524 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16525 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16526 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16530 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16531 tg3_flag_set(tp, PCI_HIGH_SPEED);
16532 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16533 tg3_flag_set(tp, PCI_32BIT);
16535 /* Chip-specific fixup from Broadcom driver */
16536 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16537 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16538 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16539 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16542 /* Default fast path register access methods */
16543 tp->read32 = tg3_read32;
16544 tp->write32 = tg3_write32;
16545 tp->read32_mbox = tg3_read32;
16546 tp->write32_mbox = tg3_write32;
16547 tp->write32_tx_mbox = tg3_write32;
16548 tp->write32_rx_mbox = tg3_write32;
16550 /* Various workaround register access methods */
16551 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16552 tp->write32 = tg3_write_indirect_reg32;
16553 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16554 (tg3_flag(tp, PCI_EXPRESS) &&
16555 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16557 * Back to back register writes can cause problems on these
16558 * chips, the workaround is to read back all reg writes
16559 * except those to mailbox regs.
16561 * See tg3_write_indirect_reg32().
16563 tp->write32 = tg3_write_flush_reg32;
16566 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16567 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16568 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16569 tp->write32_rx_mbox = tg3_write_flush_reg32;
16572 if (tg3_flag(tp, ICH_WORKAROUND)) {
16573 tp->read32 = tg3_read_indirect_reg32;
16574 tp->write32 = tg3_write_indirect_reg32;
16575 tp->read32_mbox = tg3_read_indirect_mbox;
16576 tp->write32_mbox = tg3_write_indirect_mbox;
16577 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16578 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16583 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16584 pci_cmd &= ~PCI_COMMAND_MEMORY;
16585 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16587 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16588 tp->read32_mbox = tg3_read32_mbox_5906;
16589 tp->write32_mbox = tg3_write32_mbox_5906;
16590 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16591 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16594 if (tp->write32 == tg3_write_indirect_reg32 ||
16595 (tg3_flag(tp, PCIX_MODE) &&
16596 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16597 tg3_asic_rev(tp) == ASIC_REV_5701)))
16598 tg3_flag_set(tp, SRAM_USE_CONFIG);
16600 /* The memory arbiter has to be enabled in order for SRAM accesses
16601 * to succeed. Normally on powerup the tg3 chip firmware will make
16602 * sure it is enabled, but other entities such as system netboot
16603 * code might disable it.
16605 val = tr32(MEMARB_MODE);
16606 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16608 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16609 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16610 tg3_flag(tp, 5780_CLASS)) {
16611 if (tg3_flag(tp, PCIX_MODE)) {
16612 pci_read_config_dword(tp->pdev,
16613 tp->pcix_cap + PCI_X_STATUS,
16615 tp->pci_fn = val & 0x7;
16617 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16618 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16619 tg3_asic_rev(tp) == ASIC_REV_5720) {
16620 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16621 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16622 val = tr32(TG3_CPMU_STATUS);
16624 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16625 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16627 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16628 TG3_CPMU_STATUS_FSHFT_5719;
16631 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16632 tp->write32_tx_mbox = tg3_write_flush_reg32;
16633 tp->write32_rx_mbox = tg3_write_flush_reg32;
16636 /* Get eeprom hw config before calling tg3_set_power_state().
16637 * In particular, the TG3_FLAG_IS_NIC flag must be
16638 * determined before calling tg3_set_power_state() so that
16639 * we know whether or not to switch out of Vaux power.
16640 * When the flag is set, it means that GPIO1 is used for eeprom
16641 * write protect and also implies that it is a LOM where GPIOs
16642 * are not used to switch power.
16644 tg3_get_eeprom_hw_cfg(tp);
16646 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16647 tg3_flag_clear(tp, TSO_CAPABLE);
16648 tg3_flag_clear(tp, TSO_BUG);
16649 tp->fw_needed = NULL;
16652 if (tg3_flag(tp, ENABLE_APE)) {
16653 /* Allow reads and writes to the
16654 * APE register and memory space.
16656 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16657 PCISTATE_ALLOW_APE_SHMEM_WR |
16658 PCISTATE_ALLOW_APE_PSPACE_WR;
16659 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16662 tg3_ape_lock_init(tp);
16665 /* Set up tp->grc_local_ctrl before calling
16666 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16667 * will bring 5700's external PHY out of reset.
16668 * It is also used as eeprom write protect on LOMs.
16670 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16671 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16672 tg3_flag(tp, EEPROM_WRITE_PROT))
16673 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16674 GRC_LCLCTRL_GPIO_OUTPUT1);
16675 /* Unused GPIO3 must be driven as output on 5752 because there
16676 * are no pull-up resistors on unused GPIO pins.
16678 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16679 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16681 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16682 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16683 tg3_flag(tp, 57765_CLASS))
16684 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16686 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16687 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16688 /* Turn off the debug UART. */
16689 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16690 if (tg3_flag(tp, IS_NIC))
16691 /* Keep VMain power. */
16692 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16693 GRC_LCLCTRL_GPIO_OUTPUT0;
16696 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16697 tp->grc_local_ctrl |=
16698 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16700 /* Switch out of Vaux if it is a NIC */
16701 tg3_pwrsrc_switch_to_vmain(tp);
16703 /* Derive initial jumbo mode from MTU assigned in
16704 * ether_setup() via the alloc_etherdev() call
16706 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16707 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16709 /* Determine WakeOnLan speed to use. */
16710 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16711 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16712 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16713 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16714 tg3_flag_clear(tp, WOL_SPEED_100MB);
16716 tg3_flag_set(tp, WOL_SPEED_100MB);
16719 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16720 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16722 /* A few boards don't want Ethernet@WireSpeed phy feature */
16723 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16724 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16725 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16726 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16727 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16728 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16729 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16731 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16732 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16733 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16734 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16735 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16737 if (tg3_flag(tp, 5705_PLUS) &&
16738 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16739 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16740 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16741 !tg3_flag(tp, 57765_PLUS)) {
16742 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16743 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16744 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16745 tg3_asic_rev(tp) == ASIC_REV_5761) {
16746 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16747 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16748 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16749 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16750 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16752 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16755 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16756 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16757 tp->phy_otp = tg3_read_otp_phycfg(tp);
16758 if (tp->phy_otp == 0)
16759 tp->phy_otp = TG3_OTP_DEFAULT;
16762 if (tg3_flag(tp, CPMU_PRESENT))
16763 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16765 tp->mi_mode = MAC_MI_MODE_BASE;
16767 tp->coalesce_mode = 0;
16768 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16769 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16770 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16772 /* Set these bits to enable statistics workaround. */
16773 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16774 tg3_asic_rev(tp) == ASIC_REV_5762 ||
16775 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16776 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16777 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16778 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16781 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16782 tg3_asic_rev(tp) == ASIC_REV_57780)
16783 tg3_flag_set(tp, USE_PHYLIB);
16785 err = tg3_mdio_init(tp);
16789 /* Initialize data/descriptor byte/word swapping. */
16790 val = tr32(GRC_MODE);
16791 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16792 tg3_asic_rev(tp) == ASIC_REV_5762)
16793 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16794 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16795 GRC_MODE_B2HRX_ENABLE |
16796 GRC_MODE_HTX2B_ENABLE |
16797 GRC_MODE_HOST_STACKUP);
16799 val &= GRC_MODE_HOST_STACKUP;
16801 tw32(GRC_MODE, val | tp->grc_mode);
16803 tg3_switch_clocks(tp);
16805 /* Clear this out for sanity. */
16806 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16808 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16809 tw32(TG3PCI_REG_BASE_ADDR, 0);
16811 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16813 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16814 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16815 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16816 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16817 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16818 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16819 void __iomem *sram_base;
16821 /* Write some dummy words into the SRAM status block
16822 * area, see if it reads back correctly. If the return
16823 * value is bad, force enable the PCIX workaround.
16825 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16827 writel(0x00000000, sram_base);
16828 writel(0x00000000, sram_base + 4);
16829 writel(0xffffffff, sram_base + 4);
16830 if (readl(sram_base) != 0x00000000)
16831 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16836 tg3_nvram_init(tp);
16838 /* If the device has an NVRAM, no need to load patch firmware */
16839 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16840 !tg3_flag(tp, NO_NVRAM))
16841 tp->fw_needed = NULL;
16843 grc_misc_cfg = tr32(GRC_MISC_CFG);
16844 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16846 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16847 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16848 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16849 tg3_flag_set(tp, IS_5788);
16851 if (!tg3_flag(tp, IS_5788) &&
16852 tg3_asic_rev(tp) != ASIC_REV_5700)
16853 tg3_flag_set(tp, TAGGED_STATUS);
16854 if (tg3_flag(tp, TAGGED_STATUS)) {
16855 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16856 HOSTCC_MODE_CLRTICK_TXBD);
16858 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16859 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16860 tp->misc_host_ctrl);
16863 /* Preserve the APE MAC_MODE bits */
16864 if (tg3_flag(tp, ENABLE_APE))
16865 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16869 if (tg3_10_100_only_device(tp, ent))
16870 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16872 err = tg3_phy_probe(tp);
16874 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16875 /* ... but do not return immediately ... */
16880 tg3_read_fw_ver(tp);
16882 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16883 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16885 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16886 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16888 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16891 /* 5700 {AX,BX} chips have a broken status block link
16892 * change bit implementation, so we must use the
16893 * status register in those cases.
16895 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16896 tg3_flag_set(tp, USE_LINKCHG_REG);
16898 tg3_flag_clear(tp, USE_LINKCHG_REG);
16900 /* The led_ctrl is set during tg3_phy_probe, here we might
16901 * have to force the link status polling mechanism based
16902 * upon subsystem IDs.
16904 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16905 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16906 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16907 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16908 tg3_flag_set(tp, USE_LINKCHG_REG);
16911 /* For all SERDES we poll the MAC status register. */
16912 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16913 tg3_flag_set(tp, POLL_SERDES);
16915 tg3_flag_clear(tp, POLL_SERDES);
16917 if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16918 tg3_flag_set(tp, POLL_CPMU_LINK);
16920 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16921 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16922 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16923 tg3_flag(tp, PCIX_MODE)) {
16924 tp->rx_offset = NET_SKB_PAD;
16925 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16926 tp->rx_copy_thresh = ~(u16)0;
16930 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16931 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16932 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16934 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16936 /* Increment the rx prod index on the rx std ring by at most
16937 * 8 for these chips to workaround hw errata.
16939 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16940 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16941 tg3_asic_rev(tp) == ASIC_REV_5755)
16942 tp->rx_std_max_post = 8;
16944 if (tg3_flag(tp, ASPM_WORKAROUND))
16945 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16946 PCIE_PWR_MGMT_L1_THRESH_MSK;
16951 #ifdef CONFIG_SPARC
16952 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16954 struct net_device *dev = tp->dev;
16955 struct pci_dev *pdev = tp->pdev;
16956 struct device_node *dp = pci_device_to_OF_node(pdev);
16957 const unsigned char *addr;
16960 addr = of_get_property(dp, "local-mac-address", &len);
16961 if (addr && len == ETH_ALEN) {
16962 memcpy(dev->dev_addr, addr, ETH_ALEN);
16968 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16970 struct net_device *dev = tp->dev;
16972 memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
16977 static int tg3_get_device_address(struct tg3 *tp)
16979 struct net_device *dev = tp->dev;
16980 u32 hi, lo, mac_offset;
16984 #ifdef CONFIG_SPARC
16985 if (!tg3_get_macaddr_sparc(tp))
16989 if (tg3_flag(tp, IS_SSB_CORE)) {
16990 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16991 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16996 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16997 tg3_flag(tp, 5780_CLASS)) {
16998 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
17000 if (tg3_nvram_lock(tp))
17001 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
17003 tg3_nvram_unlock(tp);
17004 } else if (tg3_flag(tp, 5717_PLUS)) {
17005 if (tp->pci_fn & 1)
17007 if (tp->pci_fn > 1)
17008 mac_offset += 0x18c;
17009 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17012 /* First try to get it from MAC address mailbox. */
17013 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17014 if ((hi >> 16) == 0x484b) {
17015 dev->dev_addr[0] = (hi >> 8) & 0xff;
17016 dev->dev_addr[1] = (hi >> 0) & 0xff;
17018 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17019 dev->dev_addr[2] = (lo >> 24) & 0xff;
17020 dev->dev_addr[3] = (lo >> 16) & 0xff;
17021 dev->dev_addr[4] = (lo >> 8) & 0xff;
17022 dev->dev_addr[5] = (lo >> 0) & 0xff;
17024 /* Some old bootcode may report a 0 MAC address in SRAM */
17025 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
17028 /* Next, try NVRAM. */
17029 if (!tg3_flag(tp, NO_NVRAM) &&
17030 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
17031 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
17032 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
17033 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
17035 /* Finally just fetch it out of the MAC control regs. */
17037 hi = tr32(MAC_ADDR_0_HIGH);
17038 lo = tr32(MAC_ADDR_0_LOW);
17040 dev->dev_addr[5] = lo & 0xff;
17041 dev->dev_addr[4] = (lo >> 8) & 0xff;
17042 dev->dev_addr[3] = (lo >> 16) & 0xff;
17043 dev->dev_addr[2] = (lo >> 24) & 0xff;
17044 dev->dev_addr[1] = hi & 0xff;
17045 dev->dev_addr[0] = (hi >> 8) & 0xff;
17049 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
17050 #ifdef CONFIG_SPARC
17051 if (!tg3_get_default_macaddr_sparc(tp))
17059 #define BOUNDARY_SINGLE_CACHELINE 1
17060 #define BOUNDARY_MULTI_CACHELINE 2
17062 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17064 int cacheline_size;
17068 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17070 cacheline_size = 1024;
17072 cacheline_size = (int) byte * 4;
17074 /* On 5703 and later chips, the boundary bits have no
17077 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17078 tg3_asic_rev(tp) != ASIC_REV_5701 &&
17079 !tg3_flag(tp, PCI_EXPRESS))
17082 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17083 goal = BOUNDARY_MULTI_CACHELINE;
17085 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17086 goal = BOUNDARY_SINGLE_CACHELINE;
17092 if (tg3_flag(tp, 57765_PLUS)) {
17093 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17100 /* PCI controllers on most RISC systems tend to disconnect
17101 * when a device tries to burst across a cache-line boundary.
17102 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17104 * Unfortunately, for PCI-E there are only limited
17105 * write-side controls for this, and thus for reads
17106 * we will still get the disconnects. We'll also waste
17107 * these PCI cycles for both read and write for chips
17108 * other than 5700 and 5701 which do not implement the
17111 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17112 switch (cacheline_size) {
17117 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17118 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17119 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17121 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17122 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17127 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17128 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17132 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17133 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17136 } else if (tg3_flag(tp, PCI_EXPRESS)) {
17137 switch (cacheline_size) {
17141 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17142 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17143 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17149 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17150 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17154 switch (cacheline_size) {
17156 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17157 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17158 DMA_RWCTRL_WRITE_BNDRY_16);
17163 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17164 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17165 DMA_RWCTRL_WRITE_BNDRY_32);
17170 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17171 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17172 DMA_RWCTRL_WRITE_BNDRY_64);
17177 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17178 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17179 DMA_RWCTRL_WRITE_BNDRY_128);
17184 val |= (DMA_RWCTRL_READ_BNDRY_256 |
17185 DMA_RWCTRL_WRITE_BNDRY_256);
17188 val |= (DMA_RWCTRL_READ_BNDRY_512 |
17189 DMA_RWCTRL_WRITE_BNDRY_512);
17193 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17194 DMA_RWCTRL_WRITE_BNDRY_1024);
17203 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17204 int size, bool to_device)
17206 struct tg3_internal_buffer_desc test_desc;
17207 u32 sram_dma_descs;
17210 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17212 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17213 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17214 tw32(RDMAC_STATUS, 0);
17215 tw32(WDMAC_STATUS, 0);
17217 tw32(BUFMGR_MODE, 0);
17218 tw32(FTQ_RESET, 0);
17220 test_desc.addr_hi = ((u64) buf_dma) >> 32;
17221 test_desc.addr_lo = buf_dma & 0xffffffff;
17222 test_desc.nic_mbuf = 0x00002100;
17223 test_desc.len = size;
17226 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17227 * the *second* time the tg3 driver was getting loaded after an
17230 * Broadcom tells me:
17231 * ...the DMA engine is connected to the GRC block and a DMA
17232 * reset may affect the GRC block in some unpredictable way...
17233 * The behavior of resets to individual blocks has not been tested.
17235 * Broadcom noted the GRC reset will also reset all sub-components.
17238 test_desc.cqid_sqid = (13 << 8) | 2;
17240 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17243 test_desc.cqid_sqid = (16 << 8) | 7;
17245 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17248 test_desc.flags = 0x00000005;
17250 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17253 val = *(((u32 *)&test_desc) + i);
17254 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17255 sram_dma_descs + (i * sizeof(u32)));
17256 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17258 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17261 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17263 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17266 for (i = 0; i < 40; i++) {
17270 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17272 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17273 if ((val & 0xffff) == sram_dma_descs) {
17284 #define TEST_BUFFER_SIZE 0x2000
17286 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17287 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17291 static int tg3_test_dma(struct tg3 *tp)
17293 dma_addr_t buf_dma;
17294 u32 *buf, saved_dma_rwctrl;
17297 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17298 &buf_dma, GFP_KERNEL);
17304 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17305 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17307 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17309 if (tg3_flag(tp, 57765_PLUS))
17312 if (tg3_flag(tp, PCI_EXPRESS)) {
17313 /* DMA read watermark not used on PCIE */
17314 tp->dma_rwctrl |= 0x00180000;
17315 } else if (!tg3_flag(tp, PCIX_MODE)) {
17316 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17317 tg3_asic_rev(tp) == ASIC_REV_5750)
17318 tp->dma_rwctrl |= 0x003f0000;
17320 tp->dma_rwctrl |= 0x003f000f;
17322 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17323 tg3_asic_rev(tp) == ASIC_REV_5704) {
17324 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17325 u32 read_water = 0x7;
17327 /* If the 5704 is behind the EPB bridge, we can
17328 * do the less restrictive ONE_DMA workaround for
17329 * better performance.
17331 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17332 tg3_asic_rev(tp) == ASIC_REV_5704)
17333 tp->dma_rwctrl |= 0x8000;
17334 else if (ccval == 0x6 || ccval == 0x7)
17335 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17337 if (tg3_asic_rev(tp) == ASIC_REV_5703)
17339 /* Set bit 23 to enable PCIX hw bug fix */
17341 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17342 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17344 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17345 /* 5780 always in PCIX mode */
17346 tp->dma_rwctrl |= 0x00144000;
17347 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17348 /* 5714 always in PCIX mode */
17349 tp->dma_rwctrl |= 0x00148000;
17351 tp->dma_rwctrl |= 0x001b000f;
17354 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17355 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17357 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17358 tg3_asic_rev(tp) == ASIC_REV_5704)
17359 tp->dma_rwctrl &= 0xfffffff0;
17361 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17362 tg3_asic_rev(tp) == ASIC_REV_5701) {
17363 /* Remove this if it causes problems for some boards. */
17364 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17366 /* On 5700/5701 chips, we need to set this bit.
17367 * Otherwise the chip will issue cacheline transactions
17368 * to streamable DMA memory with not all the byte
17369 * enables turned on. This is an error on several
17370 * RISC PCI controllers, in particular sparc64.
17372 * On 5703/5704 chips, this bit has been reassigned
17373 * a different meaning. In particular, it is used
17374 * on those chips to enable a PCI-X workaround.
17376 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17379 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17382 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17383 tg3_asic_rev(tp) != ASIC_REV_5701)
17386 /* It is best to perform DMA test with maximum write burst size
17387 * to expose the 5700/5701 write DMA bug.
17389 saved_dma_rwctrl = tp->dma_rwctrl;
17390 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17391 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17396 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17399 /* Send the buffer to the chip. */
17400 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17402 dev_err(&tp->pdev->dev,
17403 "%s: Buffer write failed. err = %d\n",
17408 /* Now read it back. */
17409 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17411 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17412 "err = %d\n", __func__, ret);
17417 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17421 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17422 DMA_RWCTRL_WRITE_BNDRY_16) {
17423 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17424 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17425 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17428 dev_err(&tp->pdev->dev,
17429 "%s: Buffer corrupted on read back! "
17430 "(%d != %d)\n", __func__, p[i], i);
17436 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17442 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17443 DMA_RWCTRL_WRITE_BNDRY_16) {
17444 /* DMA test passed without adjusting DMA boundary,
17445 * now look for chipsets that are known to expose the
17446 * DMA bug without failing the test.
17448 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17449 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17450 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17452 /* Safe to use the calculated DMA boundary. */
17453 tp->dma_rwctrl = saved_dma_rwctrl;
17456 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17460 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17465 static void tg3_init_bufmgr_config(struct tg3 *tp)
17467 if (tg3_flag(tp, 57765_PLUS)) {
17468 tp->bufmgr_config.mbuf_read_dma_low_water =
17469 DEFAULT_MB_RDMA_LOW_WATER_5705;
17470 tp->bufmgr_config.mbuf_mac_rx_low_water =
17471 DEFAULT_MB_MACRX_LOW_WATER_57765;
17472 tp->bufmgr_config.mbuf_high_water =
17473 DEFAULT_MB_HIGH_WATER_57765;
17475 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17476 DEFAULT_MB_RDMA_LOW_WATER_5705;
17477 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17478 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17479 tp->bufmgr_config.mbuf_high_water_jumbo =
17480 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17481 } else if (tg3_flag(tp, 5705_PLUS)) {
17482 tp->bufmgr_config.mbuf_read_dma_low_water =
17483 DEFAULT_MB_RDMA_LOW_WATER_5705;
17484 tp->bufmgr_config.mbuf_mac_rx_low_water =
17485 DEFAULT_MB_MACRX_LOW_WATER_5705;
17486 tp->bufmgr_config.mbuf_high_water =
17487 DEFAULT_MB_HIGH_WATER_5705;
17488 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17489 tp->bufmgr_config.mbuf_mac_rx_low_water =
17490 DEFAULT_MB_MACRX_LOW_WATER_5906;
17491 tp->bufmgr_config.mbuf_high_water =
17492 DEFAULT_MB_HIGH_WATER_5906;
17495 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17496 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17497 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17498 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17499 tp->bufmgr_config.mbuf_high_water_jumbo =
17500 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17502 tp->bufmgr_config.mbuf_read_dma_low_water =
17503 DEFAULT_MB_RDMA_LOW_WATER;
17504 tp->bufmgr_config.mbuf_mac_rx_low_water =
17505 DEFAULT_MB_MACRX_LOW_WATER;
17506 tp->bufmgr_config.mbuf_high_water =
17507 DEFAULT_MB_HIGH_WATER;
17509 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17510 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17511 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17512 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17513 tp->bufmgr_config.mbuf_high_water_jumbo =
17514 DEFAULT_MB_HIGH_WATER_JUMBO;
17517 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17518 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17521 static char *tg3_phy_string(struct tg3 *tp)
17523 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17524 case TG3_PHY_ID_BCM5400: return "5400";
17525 case TG3_PHY_ID_BCM5401: return "5401";
17526 case TG3_PHY_ID_BCM5411: return "5411";
17527 case TG3_PHY_ID_BCM5701: return "5701";
17528 case TG3_PHY_ID_BCM5703: return "5703";
17529 case TG3_PHY_ID_BCM5704: return "5704";
17530 case TG3_PHY_ID_BCM5705: return "5705";
17531 case TG3_PHY_ID_BCM5750: return "5750";
17532 case TG3_PHY_ID_BCM5752: return "5752";
17533 case TG3_PHY_ID_BCM5714: return "5714";
17534 case TG3_PHY_ID_BCM5780: return "5780";
17535 case TG3_PHY_ID_BCM5755: return "5755";
17536 case TG3_PHY_ID_BCM5787: return "5787";
17537 case TG3_PHY_ID_BCM5784: return "5784";
17538 case TG3_PHY_ID_BCM5756: return "5722/5756";
17539 case TG3_PHY_ID_BCM5906: return "5906";
17540 case TG3_PHY_ID_BCM5761: return "5761";
17541 case TG3_PHY_ID_BCM5718C: return "5718C";
17542 case TG3_PHY_ID_BCM5718S: return "5718S";
17543 case TG3_PHY_ID_BCM57765: return "57765";
17544 case TG3_PHY_ID_BCM5719C: return "5719C";
17545 case TG3_PHY_ID_BCM5720C: return "5720C";
17546 case TG3_PHY_ID_BCM5762: return "5762C";
17547 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17548 case 0: return "serdes";
17549 default: return "unknown";
17553 static char *tg3_bus_string(struct tg3 *tp, char *str)
17555 if (tg3_flag(tp, PCI_EXPRESS)) {
17556 strcpy(str, "PCI Express");
17558 } else if (tg3_flag(tp, PCIX_MODE)) {
17559 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17561 strcpy(str, "PCIX:");
17563 if ((clock_ctrl == 7) ||
17564 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17565 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17566 strcat(str, "133MHz");
17567 else if (clock_ctrl == 0)
17568 strcat(str, "33MHz");
17569 else if (clock_ctrl == 2)
17570 strcat(str, "50MHz");
17571 else if (clock_ctrl == 4)
17572 strcat(str, "66MHz");
17573 else if (clock_ctrl == 6)
17574 strcat(str, "100MHz");
17576 strcpy(str, "PCI:");
17577 if (tg3_flag(tp, PCI_HIGH_SPEED))
17578 strcat(str, "66MHz");
17580 strcat(str, "33MHz");
17582 if (tg3_flag(tp, PCI_32BIT))
17583 strcat(str, ":32-bit");
17585 strcat(str, ":64-bit");
17589 static void tg3_init_coal(struct tg3 *tp)
17591 struct ethtool_coalesce *ec = &tp->coal;
17593 memset(ec, 0, sizeof(*ec));
17594 ec->cmd = ETHTOOL_GCOALESCE;
17595 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17596 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17597 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17598 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17599 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17600 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17601 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17602 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17603 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17605 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17606 HOSTCC_MODE_CLRTICK_TXBD)) {
17607 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17608 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17609 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17610 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17613 if (tg3_flag(tp, 5705_PLUS)) {
17614 ec->rx_coalesce_usecs_irq = 0;
17615 ec->tx_coalesce_usecs_irq = 0;
17616 ec->stats_block_coalesce_usecs = 0;
17620 static int tg3_init_one(struct pci_dev *pdev,
17621 const struct pci_device_id *ent)
17623 struct net_device *dev;
17626 u32 sndmbx, rcvmbx, intmbx;
17628 u64 dma_mask, persist_dma_mask;
17629 netdev_features_t features = 0;
17631 printk_once(KERN_INFO "%s\n", version);
17633 err = pci_enable_device(pdev);
17635 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17639 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17641 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17642 goto err_out_disable_pdev;
17645 pci_set_master(pdev);
17647 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17650 goto err_out_free_res;
17653 SET_NETDEV_DEV(dev, &pdev->dev);
17655 tp = netdev_priv(dev);
17658 tp->rx_mode = TG3_DEF_RX_MODE;
17659 tp->tx_mode = TG3_DEF_TX_MODE;
17661 tp->pcierr_recovery = false;
17664 tp->msg_enable = tg3_debug;
17666 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17668 if (pdev_is_ssb_gige_core(pdev)) {
17669 tg3_flag_set(tp, IS_SSB_CORE);
17670 if (ssb_gige_must_flush_posted_writes(pdev))
17671 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17672 if (ssb_gige_one_dma_at_once(pdev))
17673 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17674 if (ssb_gige_have_roboswitch(pdev)) {
17675 tg3_flag_set(tp, USE_PHYLIB);
17676 tg3_flag_set(tp, ROBOSWITCH);
17678 if (ssb_gige_is_rgmii(pdev))
17679 tg3_flag_set(tp, RGMII_MODE);
17682 /* The word/byte swap controls here control register access byte
17683 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17686 tp->misc_host_ctrl =
17687 MISC_HOST_CTRL_MASK_PCI_INT |
17688 MISC_HOST_CTRL_WORD_SWAP |
17689 MISC_HOST_CTRL_INDIR_ACCESS |
17690 MISC_HOST_CTRL_PCISTATE_RW;
17692 /* The NONFRM (non-frame) byte/word swap controls take effect
17693 * on descriptor entries, anything which isn't packet data.
17695 * The StrongARM chips on the board (one for tx, one for rx)
17696 * are running in big-endian mode.
17698 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17699 GRC_MODE_WSWAP_NONFRM_DATA);
17700 #ifdef __BIG_ENDIAN
17701 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17703 spin_lock_init(&tp->lock);
17704 spin_lock_init(&tp->indirect_lock);
17705 INIT_WORK(&tp->reset_task, tg3_reset_task);
17707 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17709 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17711 goto err_out_free_dev;
17714 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17715 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17716 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17717 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17718 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17719 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17720 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17721 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17722 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17723 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17724 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17725 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17726 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17727 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17728 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17729 tg3_flag_set(tp, ENABLE_APE);
17730 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17731 if (!tp->aperegs) {
17732 dev_err(&pdev->dev,
17733 "Cannot map APE registers, aborting\n");
17735 goto err_out_iounmap;
17739 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17740 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17742 dev->ethtool_ops = &tg3_ethtool_ops;
17743 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17744 dev->netdev_ops = &tg3_netdev_ops;
17745 dev->irq = pdev->irq;
17747 err = tg3_get_invariants(tp, ent);
17749 dev_err(&pdev->dev,
17750 "Problem fetching invariants of chip, aborting\n");
17751 goto err_out_apeunmap;
17754 /* The EPB bridge inside 5714, 5715, and 5780 and any
17755 * device behind the EPB cannot support DMA addresses > 40-bit.
17756 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17757 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17758 * do DMA address check in tg3_start_xmit().
17760 if (tg3_flag(tp, IS_5788))
17761 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17762 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17763 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17764 #ifdef CONFIG_HIGHMEM
17765 dma_mask = DMA_BIT_MASK(64);
17768 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17770 /* Configure DMA attributes. */
17771 if (dma_mask > DMA_BIT_MASK(32)) {
17772 err = pci_set_dma_mask(pdev, dma_mask);
17774 features |= NETIF_F_HIGHDMA;
17775 err = pci_set_consistent_dma_mask(pdev,
17778 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17779 "DMA for consistent allocations\n");
17780 goto err_out_apeunmap;
17784 if (err || dma_mask == DMA_BIT_MASK(32)) {
17785 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17787 dev_err(&pdev->dev,
17788 "No usable DMA configuration, aborting\n");
17789 goto err_out_apeunmap;
17793 tg3_init_bufmgr_config(tp);
17795 /* 5700 B0 chips do not support checksumming correctly due
17796 * to hardware bugs.
17798 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17799 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17801 if (tg3_flag(tp, 5755_PLUS))
17802 features |= NETIF_F_IPV6_CSUM;
17805 /* TSO is on by default on chips that support hardware TSO.
17806 * Firmware TSO on older chips gives lower performance, so it
17807 * is off by default, but can be enabled using ethtool.
17809 if ((tg3_flag(tp, HW_TSO_1) ||
17810 tg3_flag(tp, HW_TSO_2) ||
17811 tg3_flag(tp, HW_TSO_3)) &&
17812 (features & NETIF_F_IP_CSUM))
17813 features |= NETIF_F_TSO;
17814 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17815 if (features & NETIF_F_IPV6_CSUM)
17816 features |= NETIF_F_TSO6;
17817 if (tg3_flag(tp, HW_TSO_3) ||
17818 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17819 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17820 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17821 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17822 tg3_asic_rev(tp) == ASIC_REV_57780)
17823 features |= NETIF_F_TSO_ECN;
17826 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17827 NETIF_F_HW_VLAN_CTAG_RX;
17828 dev->vlan_features |= features;
17831 * Add loopback capability only for a subset of devices that support
17832 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17833 * loopback for the remaining devices.
17835 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17836 !tg3_flag(tp, CPMU_PRESENT))
17837 /* Add the loopback capability */
17838 features |= NETIF_F_LOOPBACK;
17840 dev->hw_features |= features;
17841 dev->priv_flags |= IFF_UNICAST_FLT;
17843 /* MTU range: 60 - 9000 or 1500, depending on hardware */
17844 dev->min_mtu = TG3_MIN_MTU;
17845 dev->max_mtu = TG3_MAX_MTU(tp);
17847 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17848 !tg3_flag(tp, TSO_CAPABLE) &&
17849 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17850 tg3_flag_set(tp, MAX_RXPEND_64);
17851 tp->rx_pending = 63;
17854 err = tg3_get_device_address(tp);
17856 dev_err(&pdev->dev,
17857 "Could not obtain valid ethernet address, aborting\n");
17858 goto err_out_apeunmap;
17861 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17862 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17863 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17864 for (i = 0; i < tp->irq_max; i++) {
17865 struct tg3_napi *tnapi = &tp->napi[i];
17868 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17870 tnapi->int_mbox = intmbx;
17876 tnapi->consmbox = rcvmbx;
17877 tnapi->prodmbox = sndmbx;
17880 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17882 tnapi->coal_now = HOSTCC_MODE_NOW;
17884 if (!tg3_flag(tp, SUPPORT_MSIX))
17888 * If we support MSIX, we'll be using RSS. If we're using
17889 * RSS, the first vector only handles link interrupts and the
17890 * remaining vectors handle rx and tx interrupts. Reuse the
17891 * mailbox values for the next iteration. The values we setup
17892 * above are still useful for the single vectored mode.
17906 * Reset chip in case UNDI or EFI driver did not shutdown
17907 * DMA self test will enable WDMAC and we'll see (spurious)
17908 * pending DMA on the PCI bus at that point.
17910 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17911 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17912 tg3_full_lock(tp, 0);
17913 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17914 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17915 tg3_full_unlock(tp);
17918 err = tg3_test_dma(tp);
17920 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17921 goto err_out_apeunmap;
17926 pci_set_drvdata(pdev, dev);
17928 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17929 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17930 tg3_asic_rev(tp) == ASIC_REV_5762)
17931 tg3_flag_set(tp, PTP_CAPABLE);
17933 tg3_timer_init(tp);
17935 tg3_carrier_off(tp);
17937 err = register_netdev(dev);
17939 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17940 goto err_out_apeunmap;
17943 if (tg3_flag(tp, PTP_CAPABLE)) {
17945 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17947 if (IS_ERR(tp->ptp_clock))
17948 tp->ptp_clock = NULL;
17951 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17952 tp->board_part_number,
17953 tg3_chip_rev_id(tp),
17954 tg3_bus_string(tp, str),
17957 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17960 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17961 ethtype = "10/100Base-TX";
17962 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17963 ethtype = "1000Base-SX";
17965 ethtype = "10/100/1000Base-T";
17967 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17968 "(WireSpeed[%d], EEE[%d])\n",
17969 tg3_phy_string(tp), ethtype,
17970 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17971 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17974 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17975 (dev->features & NETIF_F_RXCSUM) != 0,
17976 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17977 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17978 tg3_flag(tp, ENABLE_ASF) != 0,
17979 tg3_flag(tp, TSO_CAPABLE) != 0);
17980 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17982 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17983 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17985 pci_save_state(pdev);
17991 iounmap(tp->aperegs);
17992 tp->aperegs = NULL;
18005 pci_release_regions(pdev);
18007 err_out_disable_pdev:
18008 if (pci_is_enabled(pdev))
18009 pci_disable_device(pdev);
18013 static void tg3_remove_one(struct pci_dev *pdev)
18015 struct net_device *dev = pci_get_drvdata(pdev);
18018 struct tg3 *tp = netdev_priv(dev);
18022 release_firmware(tp->fw);
18024 tg3_reset_task_cancel(tp);
18026 if (tg3_flag(tp, USE_PHYLIB)) {
18031 unregister_netdev(dev);
18033 iounmap(tp->aperegs);
18034 tp->aperegs = NULL;
18041 pci_release_regions(pdev);
18042 pci_disable_device(pdev);
18046 #ifdef CONFIG_PM_SLEEP
18047 static int tg3_suspend(struct device *device)
18049 struct pci_dev *pdev = to_pci_dev(device);
18050 struct net_device *dev = pci_get_drvdata(pdev);
18051 struct tg3 *tp = netdev_priv(dev);
18056 if (!netif_running(dev))
18059 tg3_reset_task_cancel(tp);
18061 tg3_netif_stop(tp);
18063 tg3_timer_stop(tp);
18065 tg3_full_lock(tp, 1);
18066 tg3_disable_ints(tp);
18067 tg3_full_unlock(tp);
18069 netif_device_detach(dev);
18071 tg3_full_lock(tp, 0);
18072 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18073 tg3_flag_clear(tp, INIT_COMPLETE);
18074 tg3_full_unlock(tp);
18076 err = tg3_power_down_prepare(tp);
18080 tg3_full_lock(tp, 0);
18082 tg3_flag_set(tp, INIT_COMPLETE);
18083 err2 = tg3_restart_hw(tp, true);
18087 tg3_timer_start(tp);
18089 netif_device_attach(dev);
18090 tg3_netif_start(tp);
18093 tg3_full_unlock(tp);
18104 static int tg3_resume(struct device *device)
18106 struct pci_dev *pdev = to_pci_dev(device);
18107 struct net_device *dev = pci_get_drvdata(pdev);
18108 struct tg3 *tp = netdev_priv(dev);
18113 if (!netif_running(dev))
18116 netif_device_attach(dev);
18118 tg3_full_lock(tp, 0);
18120 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18122 tg3_flag_set(tp, INIT_COMPLETE);
18123 err = tg3_restart_hw(tp,
18124 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18128 tg3_timer_start(tp);
18130 tg3_netif_start(tp);
18133 tg3_full_unlock(tp);
18142 #endif /* CONFIG_PM_SLEEP */
18144 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18146 static void tg3_shutdown(struct pci_dev *pdev)
18148 struct net_device *dev = pci_get_drvdata(pdev);
18149 struct tg3 *tp = netdev_priv(dev);
18152 netif_device_detach(dev);
18154 if (netif_running(dev))
18157 if (system_state == SYSTEM_POWER_OFF)
18158 tg3_power_down(tp);
18164 * tg3_io_error_detected - called when PCI error is detected
18165 * @pdev: Pointer to PCI device
18166 * @state: The current pci connection state
18168 * This function is called after a PCI bus error affecting
18169 * this device has been detected.
18171 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18172 pci_channel_state_t state)
18174 struct net_device *netdev = pci_get_drvdata(pdev);
18175 struct tg3 *tp = netdev_priv(netdev);
18176 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18178 netdev_info(netdev, "PCI I/O error detected\n");
18182 /* Could be second call or maybe we don't have netdev yet */
18183 if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
18186 /* We needn't recover from permanent error */
18187 if (state == pci_channel_io_frozen)
18188 tp->pcierr_recovery = true;
18192 tg3_netif_stop(tp);
18194 tg3_timer_stop(tp);
18196 /* Want to make sure that the reset task doesn't run */
18197 tg3_reset_task_cancel(tp);
18199 netif_device_detach(netdev);
18201 /* Clean up software state, even if MMIO is blocked */
18202 tg3_full_lock(tp, 0);
18203 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18204 tg3_full_unlock(tp);
18207 if (state == pci_channel_io_perm_failure) {
18209 tg3_napi_enable(tp);
18212 err = PCI_ERS_RESULT_DISCONNECT;
18214 pci_disable_device(pdev);
18223 * tg3_io_slot_reset - called after the pci bus has been reset.
18224 * @pdev: Pointer to PCI device
18226 * Restart the card from scratch, as if from a cold-boot.
18227 * At this point, the card has exprienced a hard reset,
18228 * followed by fixups by BIOS, and has its config space
18229 * set up identically to what it was at cold boot.
18231 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18233 struct net_device *netdev = pci_get_drvdata(pdev);
18234 struct tg3 *tp = netdev_priv(netdev);
18235 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18240 if (pci_enable_device(pdev)) {
18241 dev_err(&pdev->dev,
18242 "Cannot re-enable PCI device after reset.\n");
18246 pci_set_master(pdev);
18247 pci_restore_state(pdev);
18248 pci_save_state(pdev);
18250 if (!netdev || !netif_running(netdev)) {
18251 rc = PCI_ERS_RESULT_RECOVERED;
18255 err = tg3_power_up(tp);
18259 rc = PCI_ERS_RESULT_RECOVERED;
18262 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18263 tg3_napi_enable(tp);
18272 * tg3_io_resume - called when traffic can start flowing again.
18273 * @pdev: Pointer to PCI device
18275 * This callback is called when the error recovery driver tells
18276 * us that its OK to resume normal operation.
18278 static void tg3_io_resume(struct pci_dev *pdev)
18280 struct net_device *netdev = pci_get_drvdata(pdev);
18281 struct tg3 *tp = netdev_priv(netdev);
18286 if (!netdev || !netif_running(netdev))
18289 tg3_full_lock(tp, 0);
18290 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18291 tg3_flag_set(tp, INIT_COMPLETE);
18292 err = tg3_restart_hw(tp, true);
18294 tg3_full_unlock(tp);
18295 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18299 netif_device_attach(netdev);
18301 tg3_timer_start(tp);
18303 tg3_netif_start(tp);
18305 tg3_full_unlock(tp);
18310 tp->pcierr_recovery = false;
18314 static const struct pci_error_handlers tg3_err_handler = {
18315 .error_detected = tg3_io_error_detected,
18316 .slot_reset = tg3_io_slot_reset,
18317 .resume = tg3_io_resume
18320 static struct pci_driver tg3_driver = {
18321 .name = DRV_MODULE_NAME,
18322 .id_table = tg3_pci_tbl,
18323 .probe = tg3_init_one,
18324 .remove = tg3_remove_one,
18325 .err_handler = &tg3_err_handler,
18326 .driver.pm = &tg3_pm_ops,
18327 .shutdown = tg3_shutdown,
18330 module_pci_driver(tg3_driver);