GNU Linux-libre 4.4.285-gnu1
[releases.git] / drivers / net / ethernet / broadcom / bnx2.c
1 /* bnx2.c: QLogic bnx2 network driver.
2  *
3  * Copyright (c) 2004-2014 Broadcom Corporation
4  * Copyright (c) 2014-2015 QLogic Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  *
10  * Written by: Michael Chan  (mchan@broadcom.com)
11  */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17
18 #include <linux/stringify.h>
19 #include <linux/kernel.h>
20 #include <linux/timer.h>
21 #include <linux/errno.h>
22 #include <linux/ioport.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <linux/interrupt.h>
26 #include <linux/pci.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/skbuff.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/bitops.h>
32 #include <asm/io.h>
33 #include <asm/irq.h>
34 #include <linux/delay.h>
35 #include <asm/byteorder.h>
36 #include <asm/page.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
40 #include <linux/if.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/aer.h>
52
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
59
60 #define DRV_MODULE_NAME         "bnx2"
61 #define DRV_MODULE_VERSION      "2.2.6"
62 #define DRV_MODULE_RELDATE      "January 29, 2014"
63 #define FW_MIPS_FILE_06         "/*(DEBLOBBED)*/"
64 #define FW_RV2P_FILE_06         "/*(DEBLOBBED)*/"
65 #define FW_MIPS_FILE_09         "/*(DEBLOBBED)*/"
66 #define FW_RV2P_FILE_09_Ax      "/*(DEBLOBBED)*/"
67 #define FW_RV2P_FILE_09         "/*(DEBLOBBED)*/"
68
69 #define RUN_AT(x) (jiffies + (x))
70
71 /* Time in jiffies before concluding the transmitter is hung. */
72 #define TX_TIMEOUT  (5*HZ)
73
74 static char version[] =
75         "QLogic " DRV_MODULE_NAME " Gigabit Ethernet Driver v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76
77 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
78 MODULE_DESCRIPTION("QLogic BCM5706/5708/5709/5716 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 /*(DEBLOBBED)*/
82
83 static int disable_msi = 0;
84
85 module_param(disable_msi, int, S_IRUGO);
86 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
87
88 typedef enum {
89         BCM5706 = 0,
90         NC370T,
91         NC370I,
92         BCM5706S,
93         NC370F,
94         BCM5708,
95         BCM5708S,
96         BCM5709,
97         BCM5709S,
98         BCM5716,
99         BCM5716S,
100 } board_t;
101
102 /* indexed by board_t, above */
103 static struct {
104         char *name;
105 } board_info[] = {
106         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
107         { "HP NC370T Multifunction Gigabit Server Adapter" },
108         { "HP NC370i Multifunction Gigabit Server Adapter" },
109         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
110         { "HP NC370F Multifunction Gigabit Server Adapter" },
111         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
112         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
113         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
114         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
115         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
116         { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
117         };
118
119 static const struct pci_device_id bnx2_pci_tbl[] = {
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
121           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
123           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
126         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
127           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
128         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
129           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
130         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
131           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
132         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
133           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
134         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
135           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
136         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
137           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
138         { PCI_VENDOR_ID_BROADCOM, 0x163b,
139           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
140         { PCI_VENDOR_ID_BROADCOM, 0x163c,
141           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
142         { 0, }
143 };
144
145 static const struct flash_spec flash_table[] =
146 {
147 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
148 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
149         /* Slow EEPROM */
150         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
151          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
152          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
153          "EEPROM - slow"},
154         /* Expansion entry 0001 */
155         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
156          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
157          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
158          "Entry 0001"},
159         /* Saifun SA25F010 (non-buffered flash) */
160         /* strap, cfg1, & write1 need updates */
161         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
162          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
163          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
164          "Non-buffered flash (128kB)"},
165         /* Saifun SA25F020 (non-buffered flash) */
166         /* strap, cfg1, & write1 need updates */
167         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
168          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
169          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
170          "Non-buffered flash (256kB)"},
171         /* Expansion entry 0100 */
172         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
173          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
175          "Entry 0100"},
176         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
177         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
178          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
179          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
180          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
181         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
182         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
183          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
184          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
185          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
186         /* Saifun SA25F005 (non-buffered flash) */
187         /* strap, cfg1, & write1 need updates */
188         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
189          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
190          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
191          "Non-buffered flash (64kB)"},
192         /* Fast EEPROM */
193         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
194          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
195          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
196          "EEPROM - fast"},
197         /* Expansion entry 1001 */
198         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
199          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
200          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201          "Entry 1001"},
202         /* Expansion entry 1010 */
203         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
204          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
205          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
206          "Entry 1010"},
207         /* ATMEL AT45DB011B (buffered flash) */
208         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
209          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
210          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
211          "Buffered flash (128kB)"},
212         /* Expansion entry 1100 */
213         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
214          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
215          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
216          "Entry 1100"},
217         /* Expansion entry 1101 */
218         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
219          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
220          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
221          "Entry 1101"},
222         /* Ateml Expansion entry 1110 */
223         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
224          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
225          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
226          "Entry 1110 (Atmel)"},
227         /* ATMEL AT45DB021B (buffered flash) */
228         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
229          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
230          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
231          "Buffered flash (256kB)"},
232 };
233
234 static const struct flash_spec flash_5709 = {
235         .flags          = BNX2_NV_BUFFERED,
236         .page_bits      = BCM5709_FLASH_PAGE_BITS,
237         .page_size      = BCM5709_FLASH_PAGE_SIZE,
238         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
239         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
240         .name           = "5709 Buffered flash (256kB)",
241 };
242
243 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
244
245 static void bnx2_init_napi(struct bnx2 *bp);
246 static void bnx2_del_napi(struct bnx2 *bp);
247
248 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
249 {
250         u32 diff;
251
252         /* Tell compiler to fetch tx_prod and tx_cons from memory. */
253         barrier();
254
255         /* The ring uses 256 indices for 255 entries, one of them
256          * needs to be skipped.
257          */
258         diff = txr->tx_prod - txr->tx_cons;
259         if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
260                 diff &= 0xffff;
261                 if (diff == BNX2_TX_DESC_CNT)
262                         diff = BNX2_MAX_TX_DESC_CNT;
263         }
264         return bp->tx_ring_size - diff;
265 }
266
267 static u32
268 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
269 {
270         u32 val;
271
272         spin_lock_bh(&bp->indirect_lock);
273         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
274         val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
275         spin_unlock_bh(&bp->indirect_lock);
276         return val;
277 }
278
279 static void
280 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
281 {
282         spin_lock_bh(&bp->indirect_lock);
283         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
284         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
285         spin_unlock_bh(&bp->indirect_lock);
286 }
287
288 static void
289 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
290 {
291         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
292 }
293
294 static u32
295 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
296 {
297         return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
298 }
299
300 static void
301 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
302 {
303         offset += cid_addr;
304         spin_lock_bh(&bp->indirect_lock);
305         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
306                 int i;
307
308                 BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
309                 BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
310                         offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
311                 for (i = 0; i < 5; i++) {
312                         val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
313                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
314                                 break;
315                         udelay(5);
316                 }
317         } else {
318                 BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
319                 BNX2_WR(bp, BNX2_CTX_DATA, val);
320         }
321         spin_unlock_bh(&bp->indirect_lock);
322 }
323
324 #ifdef BCM_CNIC
325 static int
326 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
327 {
328         struct bnx2 *bp = netdev_priv(dev);
329         struct drv_ctl_io *io = &info->data.io;
330
331         switch (info->cmd) {
332         case DRV_CTL_IO_WR_CMD:
333                 bnx2_reg_wr_ind(bp, io->offset, io->data);
334                 break;
335         case DRV_CTL_IO_RD_CMD:
336                 io->data = bnx2_reg_rd_ind(bp, io->offset);
337                 break;
338         case DRV_CTL_CTX_WR_CMD:
339                 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
340                 break;
341         default:
342                 return -EINVAL;
343         }
344         return 0;
345 }
346
347 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
348 {
349         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
350         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
351         int sb_id;
352
353         if (bp->flags & BNX2_FLAG_USING_MSIX) {
354                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
355                 bnapi->cnic_present = 0;
356                 sb_id = bp->irq_nvecs;
357                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
358         } else {
359                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
360                 bnapi->cnic_tag = bnapi->last_status_idx;
361                 bnapi->cnic_present = 1;
362                 sb_id = 0;
363                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
364         }
365
366         cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
367         cp->irq_arr[0].status_blk = (void *)
368                 ((unsigned long) bnapi->status_blk.msi +
369                 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
370         cp->irq_arr[0].status_blk_num = sb_id;
371         cp->num_irq = 1;
372 }
373
374 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
375                               void *data)
376 {
377         struct bnx2 *bp = netdev_priv(dev);
378         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
379
380         if (ops == NULL)
381                 return -EINVAL;
382
383         if (cp->drv_state & CNIC_DRV_STATE_REGD)
384                 return -EBUSY;
385
386         if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
387                 return -ENODEV;
388
389         bp->cnic_data = data;
390         rcu_assign_pointer(bp->cnic_ops, ops);
391
392         cp->num_irq = 0;
393         cp->drv_state = CNIC_DRV_STATE_REGD;
394
395         bnx2_setup_cnic_irq_info(bp);
396
397         return 0;
398 }
399
400 static int bnx2_unregister_cnic(struct net_device *dev)
401 {
402         struct bnx2 *bp = netdev_priv(dev);
403         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
404         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
405
406         mutex_lock(&bp->cnic_lock);
407         cp->drv_state = 0;
408         bnapi->cnic_present = 0;
409         RCU_INIT_POINTER(bp->cnic_ops, NULL);
410         mutex_unlock(&bp->cnic_lock);
411         synchronize_rcu();
412         return 0;
413 }
414
415 static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
416 {
417         struct bnx2 *bp = netdev_priv(dev);
418         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
419
420         if (!cp->max_iscsi_conn)
421                 return NULL;
422
423         cp->drv_owner = THIS_MODULE;
424         cp->chip_id = bp->chip_id;
425         cp->pdev = bp->pdev;
426         cp->io_base = bp->regview;
427         cp->drv_ctl = bnx2_drv_ctl;
428         cp->drv_register_cnic = bnx2_register_cnic;
429         cp->drv_unregister_cnic = bnx2_unregister_cnic;
430
431         return cp;
432 }
433
434 static void
435 bnx2_cnic_stop(struct bnx2 *bp)
436 {
437         struct cnic_ops *c_ops;
438         struct cnic_ctl_info info;
439
440         mutex_lock(&bp->cnic_lock);
441         c_ops = rcu_dereference_protected(bp->cnic_ops,
442                                           lockdep_is_held(&bp->cnic_lock));
443         if (c_ops) {
444                 info.cmd = CNIC_CTL_STOP_CMD;
445                 c_ops->cnic_ctl(bp->cnic_data, &info);
446         }
447         mutex_unlock(&bp->cnic_lock);
448 }
449
450 static void
451 bnx2_cnic_start(struct bnx2 *bp)
452 {
453         struct cnic_ops *c_ops;
454         struct cnic_ctl_info info;
455
456         mutex_lock(&bp->cnic_lock);
457         c_ops = rcu_dereference_protected(bp->cnic_ops,
458                                           lockdep_is_held(&bp->cnic_lock));
459         if (c_ops) {
460                 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
461                         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
462
463                         bnapi->cnic_tag = bnapi->last_status_idx;
464                 }
465                 info.cmd = CNIC_CTL_START_CMD;
466                 c_ops->cnic_ctl(bp->cnic_data, &info);
467         }
468         mutex_unlock(&bp->cnic_lock);
469 }
470
471 #else
472
473 static void
474 bnx2_cnic_stop(struct bnx2 *bp)
475 {
476 }
477
478 static void
479 bnx2_cnic_start(struct bnx2 *bp)
480 {
481 }
482
483 #endif
484
485 static int
486 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
487 {
488         u32 val1;
489         int i, ret;
490
491         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
492                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
493                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
494
495                 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
496                 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
497
498                 udelay(40);
499         }
500
501         val1 = (bp->phy_addr << 21) | (reg << 16) |
502                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
503                 BNX2_EMAC_MDIO_COMM_START_BUSY;
504         BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
505
506         for (i = 0; i < 50; i++) {
507                 udelay(10);
508
509                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
510                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
511                         udelay(5);
512
513                         val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
514                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
515
516                         break;
517                 }
518         }
519
520         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
521                 *val = 0x0;
522                 ret = -EBUSY;
523         }
524         else {
525                 *val = val1;
526                 ret = 0;
527         }
528
529         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
530                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
531                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
532
533                 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
534                 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
535
536                 udelay(40);
537         }
538
539         return ret;
540 }
541
542 static int
543 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
544 {
545         u32 val1;
546         int i, ret;
547
548         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
549                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
550                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
551
552                 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
553                 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
554
555                 udelay(40);
556         }
557
558         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
559                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
560                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
561         BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
562
563         for (i = 0; i < 50; i++) {
564                 udelay(10);
565
566                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
567                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
568                         udelay(5);
569                         break;
570                 }
571         }
572
573         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
574                 ret = -EBUSY;
575         else
576                 ret = 0;
577
578         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
579                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
580                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
581
582                 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
583                 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
584
585                 udelay(40);
586         }
587
588         return ret;
589 }
590
591 static void
592 bnx2_disable_int(struct bnx2 *bp)
593 {
594         int i;
595         struct bnx2_napi *bnapi;
596
597         for (i = 0; i < bp->irq_nvecs; i++) {
598                 bnapi = &bp->bnx2_napi[i];
599                 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
600                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
601         }
602         BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
603 }
604
605 static void
606 bnx2_enable_int(struct bnx2 *bp)
607 {
608         int i;
609         struct bnx2_napi *bnapi;
610
611         for (i = 0; i < bp->irq_nvecs; i++) {
612                 bnapi = &bp->bnx2_napi[i];
613
614                 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
615                         BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
616                         BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
617                         bnapi->last_status_idx);
618
619                 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
620                         BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
621                         bnapi->last_status_idx);
622         }
623         BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
624 }
625
626 static void
627 bnx2_disable_int_sync(struct bnx2 *bp)
628 {
629         int i;
630
631         atomic_inc(&bp->intr_sem);
632         if (!netif_running(bp->dev))
633                 return;
634
635         bnx2_disable_int(bp);
636         for (i = 0; i < bp->irq_nvecs; i++)
637                 synchronize_irq(bp->irq_tbl[i].vector);
638 }
639
640 static void
641 bnx2_napi_disable(struct bnx2 *bp)
642 {
643         int i;
644
645         for (i = 0; i < bp->irq_nvecs; i++)
646                 napi_disable(&bp->bnx2_napi[i].napi);
647 }
648
649 static void
650 bnx2_napi_enable(struct bnx2 *bp)
651 {
652         int i;
653
654         for (i = 0; i < bp->irq_nvecs; i++)
655                 napi_enable(&bp->bnx2_napi[i].napi);
656 }
657
658 static void
659 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
660 {
661         if (stop_cnic)
662                 bnx2_cnic_stop(bp);
663         if (netif_running(bp->dev)) {
664                 bnx2_napi_disable(bp);
665                 netif_tx_disable(bp->dev);
666         }
667         bnx2_disable_int_sync(bp);
668         netif_carrier_off(bp->dev);     /* prevent tx timeout */
669 }
670
671 static void
672 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
673 {
674         if (atomic_dec_and_test(&bp->intr_sem)) {
675                 if (netif_running(bp->dev)) {
676                         netif_tx_wake_all_queues(bp->dev);
677                         spin_lock_bh(&bp->phy_lock);
678                         if (bp->link_up)
679                                 netif_carrier_on(bp->dev);
680                         spin_unlock_bh(&bp->phy_lock);
681                         bnx2_napi_enable(bp);
682                         bnx2_enable_int(bp);
683                         if (start_cnic)
684                                 bnx2_cnic_start(bp);
685                 }
686         }
687 }
688
689 static void
690 bnx2_free_tx_mem(struct bnx2 *bp)
691 {
692         int i;
693
694         for (i = 0; i < bp->num_tx_rings; i++) {
695                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
696                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
697
698                 if (txr->tx_desc_ring) {
699                         dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
700                                           txr->tx_desc_ring,
701                                           txr->tx_desc_mapping);
702                         txr->tx_desc_ring = NULL;
703                 }
704                 kfree(txr->tx_buf_ring);
705                 txr->tx_buf_ring = NULL;
706         }
707 }
708
709 static void
710 bnx2_free_rx_mem(struct bnx2 *bp)
711 {
712         int i;
713
714         for (i = 0; i < bp->num_rx_rings; i++) {
715                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
716                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
717                 int j;
718
719                 for (j = 0; j < bp->rx_max_ring; j++) {
720                         if (rxr->rx_desc_ring[j])
721                                 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
722                                                   rxr->rx_desc_ring[j],
723                                                   rxr->rx_desc_mapping[j]);
724                         rxr->rx_desc_ring[j] = NULL;
725                 }
726                 vfree(rxr->rx_buf_ring);
727                 rxr->rx_buf_ring = NULL;
728
729                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
730                         if (rxr->rx_pg_desc_ring[j])
731                                 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
732                                                   rxr->rx_pg_desc_ring[j],
733                                                   rxr->rx_pg_desc_mapping[j]);
734                         rxr->rx_pg_desc_ring[j] = NULL;
735                 }
736                 vfree(rxr->rx_pg_ring);
737                 rxr->rx_pg_ring = NULL;
738         }
739 }
740
741 static int
742 bnx2_alloc_tx_mem(struct bnx2 *bp)
743 {
744         int i;
745
746         for (i = 0; i < bp->num_tx_rings; i++) {
747                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
748                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
749
750                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
751                 if (txr->tx_buf_ring == NULL)
752                         return -ENOMEM;
753
754                 txr->tx_desc_ring =
755                         dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
756                                            &txr->tx_desc_mapping, GFP_KERNEL);
757                 if (txr->tx_desc_ring == NULL)
758                         return -ENOMEM;
759         }
760         return 0;
761 }
762
763 static int
764 bnx2_alloc_rx_mem(struct bnx2 *bp)
765 {
766         int i;
767
768         for (i = 0; i < bp->num_rx_rings; i++) {
769                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
770                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
771                 int j;
772
773                 rxr->rx_buf_ring =
774                         vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
775                 if (rxr->rx_buf_ring == NULL)
776                         return -ENOMEM;
777
778                 for (j = 0; j < bp->rx_max_ring; j++) {
779                         rxr->rx_desc_ring[j] =
780                                 dma_alloc_coherent(&bp->pdev->dev,
781                                                    RXBD_RING_SIZE,
782                                                    &rxr->rx_desc_mapping[j],
783                                                    GFP_KERNEL);
784                         if (rxr->rx_desc_ring[j] == NULL)
785                                 return -ENOMEM;
786
787                 }
788
789                 if (bp->rx_pg_ring_size) {
790                         rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
791                                                   bp->rx_max_pg_ring);
792                         if (rxr->rx_pg_ring == NULL)
793                                 return -ENOMEM;
794
795                 }
796
797                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
798                         rxr->rx_pg_desc_ring[j] =
799                                 dma_alloc_coherent(&bp->pdev->dev,
800                                                    RXBD_RING_SIZE,
801                                                    &rxr->rx_pg_desc_mapping[j],
802                                                    GFP_KERNEL);
803                         if (rxr->rx_pg_desc_ring[j] == NULL)
804                                 return -ENOMEM;
805
806                 }
807         }
808         return 0;
809 }
810
811 static void
812 bnx2_free_stats_blk(struct net_device *dev)
813 {
814         struct bnx2 *bp = netdev_priv(dev);
815
816         if (bp->status_blk) {
817                 dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
818                                   bp->status_blk,
819                                   bp->status_blk_mapping);
820                 bp->status_blk = NULL;
821                 bp->stats_blk = NULL;
822         }
823 }
824
825 static int
826 bnx2_alloc_stats_blk(struct net_device *dev)
827 {
828         int status_blk_size;
829         void *status_blk;
830         struct bnx2 *bp = netdev_priv(dev);
831
832         /* Combine status and statistics blocks into one allocation. */
833         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
834         if (bp->flags & BNX2_FLAG_MSIX_CAP)
835                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
836                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
837         bp->status_stats_size = status_blk_size +
838                                 sizeof(struct statistics_block);
839         status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
840                                          &bp->status_blk_mapping, GFP_KERNEL);
841         if (status_blk == NULL)
842                 return -ENOMEM;
843
844         bp->status_blk = status_blk;
845         bp->stats_blk = status_blk + status_blk_size;
846         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
847
848         return 0;
849 }
850
851 static void
852 bnx2_free_mem(struct bnx2 *bp)
853 {
854         int i;
855         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
856
857         bnx2_free_tx_mem(bp);
858         bnx2_free_rx_mem(bp);
859
860         for (i = 0; i < bp->ctx_pages; i++) {
861                 if (bp->ctx_blk[i]) {
862                         dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
863                                           bp->ctx_blk[i],
864                                           bp->ctx_blk_mapping[i]);
865                         bp->ctx_blk[i] = NULL;
866                 }
867         }
868
869         if (bnapi->status_blk.msi)
870                 bnapi->status_blk.msi = NULL;
871 }
872
873 static int
874 bnx2_alloc_mem(struct bnx2 *bp)
875 {
876         int i, err;
877         struct bnx2_napi *bnapi;
878
879         bnapi = &bp->bnx2_napi[0];
880         bnapi->status_blk.msi = bp->status_blk;
881         bnapi->hw_tx_cons_ptr =
882                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
883         bnapi->hw_rx_cons_ptr =
884                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
885         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
886                 for (i = 1; i < bp->irq_nvecs; i++) {
887                         struct status_block_msix *sblk;
888
889                         bnapi = &bp->bnx2_napi[i];
890
891                         sblk = (bp->status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
892                         bnapi->status_blk.msix = sblk;
893                         bnapi->hw_tx_cons_ptr =
894                                 &sblk->status_tx_quick_consumer_index;
895                         bnapi->hw_rx_cons_ptr =
896                                 &sblk->status_rx_quick_consumer_index;
897                         bnapi->int_num = i << 24;
898                 }
899         }
900
901         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
902                 bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
903                 if (bp->ctx_pages == 0)
904                         bp->ctx_pages = 1;
905                 for (i = 0; i < bp->ctx_pages; i++) {
906                         bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
907                                                 BNX2_PAGE_SIZE,
908                                                 &bp->ctx_blk_mapping[i],
909                                                 GFP_KERNEL);
910                         if (bp->ctx_blk[i] == NULL)
911                                 goto alloc_mem_err;
912                 }
913         }
914
915         err = bnx2_alloc_rx_mem(bp);
916         if (err)
917                 goto alloc_mem_err;
918
919         err = bnx2_alloc_tx_mem(bp);
920         if (err)
921                 goto alloc_mem_err;
922
923         return 0;
924
925 alloc_mem_err:
926         bnx2_free_mem(bp);
927         return -ENOMEM;
928 }
929
930 static void
931 bnx2_report_fw_link(struct bnx2 *bp)
932 {
933         u32 fw_link_status = 0;
934
935         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
936                 return;
937
938         if (bp->link_up) {
939                 u32 bmsr;
940
941                 switch (bp->line_speed) {
942                 case SPEED_10:
943                         if (bp->duplex == DUPLEX_HALF)
944                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
945                         else
946                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
947                         break;
948                 case SPEED_100:
949                         if (bp->duplex == DUPLEX_HALF)
950                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
951                         else
952                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
953                         break;
954                 case SPEED_1000:
955                         if (bp->duplex == DUPLEX_HALF)
956                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
957                         else
958                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
959                         break;
960                 case SPEED_2500:
961                         if (bp->duplex == DUPLEX_HALF)
962                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
963                         else
964                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
965                         break;
966                 }
967
968                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
969
970                 if (bp->autoneg) {
971                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
972
973                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
974                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
975
976                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
977                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
978                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
979                         else
980                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
981                 }
982         }
983         else
984                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
985
986         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
987 }
988
989 static char *
990 bnx2_xceiver_str(struct bnx2 *bp)
991 {
992         return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
993                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
994                  "Copper");
995 }
996
997 static void
998 bnx2_report_link(struct bnx2 *bp)
999 {
1000         if (bp->link_up) {
1001                 netif_carrier_on(bp->dev);
1002                 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
1003                             bnx2_xceiver_str(bp),
1004                             bp->line_speed,
1005                             bp->duplex == DUPLEX_FULL ? "full" : "half");
1006
1007                 if (bp->flow_ctrl) {
1008                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
1009                                 pr_cont(", receive ");
1010                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1011                                         pr_cont("& transmit ");
1012                         }
1013                         else {
1014                                 pr_cont(", transmit ");
1015                         }
1016                         pr_cont("flow control ON");
1017                 }
1018                 pr_cont("\n");
1019         } else {
1020                 netif_carrier_off(bp->dev);
1021                 netdev_err(bp->dev, "NIC %s Link is Down\n",
1022                            bnx2_xceiver_str(bp));
1023         }
1024
1025         bnx2_report_fw_link(bp);
1026 }
1027
1028 static void
1029 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1030 {
1031         u32 local_adv, remote_adv;
1032
1033         bp->flow_ctrl = 0;
1034         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1035                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1036
1037                 if (bp->duplex == DUPLEX_FULL) {
1038                         bp->flow_ctrl = bp->req_flow_ctrl;
1039                 }
1040                 return;
1041         }
1042
1043         if (bp->duplex != DUPLEX_FULL) {
1044                 return;
1045         }
1046
1047         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1048             (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
1049                 u32 val;
1050
1051                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1052                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1053                         bp->flow_ctrl |= FLOW_CTRL_TX;
1054                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1055                         bp->flow_ctrl |= FLOW_CTRL_RX;
1056                 return;
1057         }
1058
1059         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1060         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1061
1062         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1063                 u32 new_local_adv = 0;
1064                 u32 new_remote_adv = 0;
1065
1066                 if (local_adv & ADVERTISE_1000XPAUSE)
1067                         new_local_adv |= ADVERTISE_PAUSE_CAP;
1068                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1069                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
1070                 if (remote_adv & ADVERTISE_1000XPAUSE)
1071                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
1072                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1073                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1074
1075                 local_adv = new_local_adv;
1076                 remote_adv = new_remote_adv;
1077         }
1078
1079         /* See Table 28B-3 of 802.3ab-1999 spec. */
1080         if (local_adv & ADVERTISE_PAUSE_CAP) {
1081                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1082                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1083                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1084                         }
1085                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1086                                 bp->flow_ctrl = FLOW_CTRL_RX;
1087                         }
1088                 }
1089                 else {
1090                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1091                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1092                         }
1093                 }
1094         }
1095         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1096                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1097                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1098
1099                         bp->flow_ctrl = FLOW_CTRL_TX;
1100                 }
1101         }
1102 }
1103
1104 static int
1105 bnx2_5709s_linkup(struct bnx2 *bp)
1106 {
1107         u32 val, speed;
1108
1109         bp->link_up = 1;
1110
1111         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1112         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1113         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1114
1115         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1116                 bp->line_speed = bp->req_line_speed;
1117                 bp->duplex = bp->req_duplex;
1118                 return 0;
1119         }
1120         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1121         switch (speed) {
1122                 case MII_BNX2_GP_TOP_AN_SPEED_10:
1123                         bp->line_speed = SPEED_10;
1124                         break;
1125                 case MII_BNX2_GP_TOP_AN_SPEED_100:
1126                         bp->line_speed = SPEED_100;
1127                         break;
1128                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1129                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1130                         bp->line_speed = SPEED_1000;
1131                         break;
1132                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1133                         bp->line_speed = SPEED_2500;
1134                         break;
1135         }
1136         if (val & MII_BNX2_GP_TOP_AN_FD)
1137                 bp->duplex = DUPLEX_FULL;
1138         else
1139                 bp->duplex = DUPLEX_HALF;
1140         return 0;
1141 }
1142
1143 static int
1144 bnx2_5708s_linkup(struct bnx2 *bp)
1145 {
1146         u32 val;
1147
1148         bp->link_up = 1;
1149         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1150         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1151                 case BCM5708S_1000X_STAT1_SPEED_10:
1152                         bp->line_speed = SPEED_10;
1153                         break;
1154                 case BCM5708S_1000X_STAT1_SPEED_100:
1155                         bp->line_speed = SPEED_100;
1156                         break;
1157                 case BCM5708S_1000X_STAT1_SPEED_1G:
1158                         bp->line_speed = SPEED_1000;
1159                         break;
1160                 case BCM5708S_1000X_STAT1_SPEED_2G5:
1161                         bp->line_speed = SPEED_2500;
1162                         break;
1163         }
1164         if (val & BCM5708S_1000X_STAT1_FD)
1165                 bp->duplex = DUPLEX_FULL;
1166         else
1167                 bp->duplex = DUPLEX_HALF;
1168
1169         return 0;
1170 }
1171
1172 static int
1173 bnx2_5706s_linkup(struct bnx2 *bp)
1174 {
1175         u32 bmcr, local_adv, remote_adv, common;
1176
1177         bp->link_up = 1;
1178         bp->line_speed = SPEED_1000;
1179
1180         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1181         if (bmcr & BMCR_FULLDPLX) {
1182                 bp->duplex = DUPLEX_FULL;
1183         }
1184         else {
1185                 bp->duplex = DUPLEX_HALF;
1186         }
1187
1188         if (!(bmcr & BMCR_ANENABLE)) {
1189                 return 0;
1190         }
1191
1192         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1193         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1194
1195         common = local_adv & remote_adv;
1196         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1197
1198                 if (common & ADVERTISE_1000XFULL) {
1199                         bp->duplex = DUPLEX_FULL;
1200                 }
1201                 else {
1202                         bp->duplex = DUPLEX_HALF;
1203                 }
1204         }
1205
1206         return 0;
1207 }
1208
1209 static int
1210 bnx2_copper_linkup(struct bnx2 *bp)
1211 {
1212         u32 bmcr;
1213
1214         bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX;
1215
1216         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1217         if (bmcr & BMCR_ANENABLE) {
1218                 u32 local_adv, remote_adv, common;
1219
1220                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1221                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1222
1223                 common = local_adv & (remote_adv >> 2);
1224                 if (common & ADVERTISE_1000FULL) {
1225                         bp->line_speed = SPEED_1000;
1226                         bp->duplex = DUPLEX_FULL;
1227                 }
1228                 else if (common & ADVERTISE_1000HALF) {
1229                         bp->line_speed = SPEED_1000;
1230                         bp->duplex = DUPLEX_HALF;
1231                 }
1232                 else {
1233                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1234                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1235
1236                         common = local_adv & remote_adv;
1237                         if (common & ADVERTISE_100FULL) {
1238                                 bp->line_speed = SPEED_100;
1239                                 bp->duplex = DUPLEX_FULL;
1240                         }
1241                         else if (common & ADVERTISE_100HALF) {
1242                                 bp->line_speed = SPEED_100;
1243                                 bp->duplex = DUPLEX_HALF;
1244                         }
1245                         else if (common & ADVERTISE_10FULL) {
1246                                 bp->line_speed = SPEED_10;
1247                                 bp->duplex = DUPLEX_FULL;
1248                         }
1249                         else if (common & ADVERTISE_10HALF) {
1250                                 bp->line_speed = SPEED_10;
1251                                 bp->duplex = DUPLEX_HALF;
1252                         }
1253                         else {
1254                                 bp->line_speed = 0;
1255                                 bp->link_up = 0;
1256                         }
1257                 }
1258         }
1259         else {
1260                 if (bmcr & BMCR_SPEED100) {
1261                         bp->line_speed = SPEED_100;
1262                 }
1263                 else {
1264                         bp->line_speed = SPEED_10;
1265                 }
1266                 if (bmcr & BMCR_FULLDPLX) {
1267                         bp->duplex = DUPLEX_FULL;
1268                 }
1269                 else {
1270                         bp->duplex = DUPLEX_HALF;
1271                 }
1272         }
1273
1274         if (bp->link_up) {
1275                 u32 ext_status;
1276
1277                 bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status);
1278                 if (ext_status & EXT_STATUS_MDIX)
1279                         bp->phy_flags |= BNX2_PHY_FLAG_MDIX;
1280         }
1281
1282         return 0;
1283 }
1284
1285 static void
1286 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1287 {
1288         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1289
1290         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1291         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1292         val |= 0x02 << 8;
1293
1294         if (bp->flow_ctrl & FLOW_CTRL_TX)
1295                 val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1296
1297         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1298 }
1299
1300 static void
1301 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1302 {
1303         int i;
1304         u32 cid;
1305
1306         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1307                 if (i == 1)
1308                         cid = RX_RSS_CID;
1309                 bnx2_init_rx_context(bp, cid);
1310         }
1311 }
1312
1313 static void
1314 bnx2_set_mac_link(struct bnx2 *bp)
1315 {
1316         u32 val;
1317
1318         BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1319         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1320                 (bp->duplex == DUPLEX_HALF)) {
1321                 BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1322         }
1323
1324         /* Configure the EMAC mode register. */
1325         val = BNX2_RD(bp, BNX2_EMAC_MODE);
1326
1327         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1328                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1329                 BNX2_EMAC_MODE_25G_MODE);
1330
1331         if (bp->link_up) {
1332                 switch (bp->line_speed) {
1333                         case SPEED_10:
1334                                 if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
1335                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1336                                         break;
1337                                 }
1338                                 /* fall through */
1339                         case SPEED_100:
1340                                 val |= BNX2_EMAC_MODE_PORT_MII;
1341                                 break;
1342                         case SPEED_2500:
1343                                 val |= BNX2_EMAC_MODE_25G_MODE;
1344                                 /* fall through */
1345                         case SPEED_1000:
1346                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1347                                 break;
1348                 }
1349         }
1350         else {
1351                 val |= BNX2_EMAC_MODE_PORT_GMII;
1352         }
1353
1354         /* Set the MAC to operate in the appropriate duplex mode. */
1355         if (bp->duplex == DUPLEX_HALF)
1356                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1357         BNX2_WR(bp, BNX2_EMAC_MODE, val);
1358
1359         /* Enable/disable rx PAUSE. */
1360         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1361
1362         if (bp->flow_ctrl & FLOW_CTRL_RX)
1363                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1364         BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1365
1366         /* Enable/disable tx PAUSE. */
1367         val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1368         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1369
1370         if (bp->flow_ctrl & FLOW_CTRL_TX)
1371                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1372         BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1373
1374         /* Acknowledge the interrupt. */
1375         BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1376
1377         bnx2_init_all_rx_contexts(bp);
1378 }
1379
1380 static void
1381 bnx2_enable_bmsr1(struct bnx2 *bp)
1382 {
1383         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1384             (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1385                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1386                                MII_BNX2_BLK_ADDR_GP_STATUS);
1387 }
1388
1389 static void
1390 bnx2_disable_bmsr1(struct bnx2 *bp)
1391 {
1392         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1393             (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1394                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1395                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1396 }
1397
1398 static int
1399 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1400 {
1401         u32 up1;
1402         int ret = 1;
1403
1404         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1405                 return 0;
1406
1407         if (bp->autoneg & AUTONEG_SPEED)
1408                 bp->advertising |= ADVERTISED_2500baseX_Full;
1409
1410         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1411                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1412
1413         bnx2_read_phy(bp, bp->mii_up1, &up1);
1414         if (!(up1 & BCM5708S_UP1_2G5)) {
1415                 up1 |= BCM5708S_UP1_2G5;
1416                 bnx2_write_phy(bp, bp->mii_up1, up1);
1417                 ret = 0;
1418         }
1419
1420         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1421                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1422                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1423
1424         return ret;
1425 }
1426
1427 static int
1428 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1429 {
1430         u32 up1;
1431         int ret = 0;
1432
1433         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1434                 return 0;
1435
1436         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1437                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1438
1439         bnx2_read_phy(bp, bp->mii_up1, &up1);
1440         if (up1 & BCM5708S_UP1_2G5) {
1441                 up1 &= ~BCM5708S_UP1_2G5;
1442                 bnx2_write_phy(bp, bp->mii_up1, up1);
1443                 ret = 1;
1444         }
1445
1446         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1447                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1448                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1449
1450         return ret;
1451 }
1452
1453 static void
1454 bnx2_enable_forced_2g5(struct bnx2 *bp)
1455 {
1456         u32 uninitialized_var(bmcr);
1457         int err;
1458
1459         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1460                 return;
1461
1462         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1463                 u32 val;
1464
1465                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1466                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1467                 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1468                         val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1469                         val |= MII_BNX2_SD_MISC1_FORCE |
1470                                 MII_BNX2_SD_MISC1_FORCE_2_5G;
1471                         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1472                 }
1473
1474                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1475                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1476                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1477
1478         } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1479                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1480                 if (!err)
1481                         bmcr |= BCM5708S_BMCR_FORCE_2500;
1482         } else {
1483                 return;
1484         }
1485
1486         if (err)
1487                 return;
1488
1489         if (bp->autoneg & AUTONEG_SPEED) {
1490                 bmcr &= ~BMCR_ANENABLE;
1491                 if (bp->req_duplex == DUPLEX_FULL)
1492                         bmcr |= BMCR_FULLDPLX;
1493         }
1494         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1495 }
1496
1497 static void
1498 bnx2_disable_forced_2g5(struct bnx2 *bp)
1499 {
1500         u32 uninitialized_var(bmcr);
1501         int err;
1502
1503         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1504                 return;
1505
1506         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1507                 u32 val;
1508
1509                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1510                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1511                 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1512                         val &= ~MII_BNX2_SD_MISC1_FORCE;
1513                         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1514                 }
1515
1516                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1517                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1518                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1519
1520         } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1521                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1522                 if (!err)
1523                         bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1524         } else {
1525                 return;
1526         }
1527
1528         if (err)
1529                 return;
1530
1531         if (bp->autoneg & AUTONEG_SPEED)
1532                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1533         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1534 }
1535
1536 static void
1537 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1538 {
1539         u32 val;
1540
1541         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1542         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1543         if (start)
1544                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1545         else
1546                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1547 }
1548
1549 static int
1550 bnx2_set_link(struct bnx2 *bp)
1551 {
1552         u32 bmsr;
1553         u8 link_up;
1554
1555         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1556                 bp->link_up = 1;
1557                 return 0;
1558         }
1559
1560         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1561                 return 0;
1562
1563         link_up = bp->link_up;
1564
1565         bnx2_enable_bmsr1(bp);
1566         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1567         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1568         bnx2_disable_bmsr1(bp);
1569
1570         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1571             (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
1572                 u32 val, an_dbg;
1573
1574                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1575                         bnx2_5706s_force_link_dn(bp, 0);
1576                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1577                 }
1578                 val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1579
1580                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1581                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1582                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1583
1584                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1585                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1586                         bmsr |= BMSR_LSTATUS;
1587                 else
1588                         bmsr &= ~BMSR_LSTATUS;
1589         }
1590
1591         if (bmsr & BMSR_LSTATUS) {
1592                 bp->link_up = 1;
1593
1594                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1595                         if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
1596                                 bnx2_5706s_linkup(bp);
1597                         else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
1598                                 bnx2_5708s_linkup(bp);
1599                         else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1600                                 bnx2_5709s_linkup(bp);
1601                 }
1602                 else {
1603                         bnx2_copper_linkup(bp);
1604                 }
1605                 bnx2_resolve_flow_ctrl(bp);
1606         }
1607         else {
1608                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1609                     (bp->autoneg & AUTONEG_SPEED))
1610                         bnx2_disable_forced_2g5(bp);
1611
1612                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1613                         u32 bmcr;
1614
1615                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1616                         bmcr |= BMCR_ANENABLE;
1617                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1618
1619                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1620                 }
1621                 bp->link_up = 0;
1622         }
1623
1624         if (bp->link_up != link_up) {
1625                 bnx2_report_link(bp);
1626         }
1627
1628         bnx2_set_mac_link(bp);
1629
1630         return 0;
1631 }
1632
1633 static int
1634 bnx2_reset_phy(struct bnx2 *bp)
1635 {
1636         int i;
1637         u32 reg;
1638
1639         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1640
1641 #define PHY_RESET_MAX_WAIT 100
1642         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1643                 udelay(10);
1644
1645                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1646                 if (!(reg & BMCR_RESET)) {
1647                         udelay(20);
1648                         break;
1649                 }
1650         }
1651         if (i == PHY_RESET_MAX_WAIT) {
1652                 return -EBUSY;
1653         }
1654         return 0;
1655 }
1656
1657 static u32
1658 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1659 {
1660         u32 adv = 0;
1661
1662         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1663                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1664
1665                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1666                         adv = ADVERTISE_1000XPAUSE;
1667                 }
1668                 else {
1669                         adv = ADVERTISE_PAUSE_CAP;
1670                 }
1671         }
1672         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1673                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1674                         adv = ADVERTISE_1000XPSE_ASYM;
1675                 }
1676                 else {
1677                         adv = ADVERTISE_PAUSE_ASYM;
1678                 }
1679         }
1680         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1681                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1682                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1683                 }
1684                 else {
1685                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1686                 }
1687         }
1688         return adv;
1689 }
1690
1691 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1692
1693 static int
1694 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1695 __releases(&bp->phy_lock)
1696 __acquires(&bp->phy_lock)
1697 {
1698         u32 speed_arg = 0, pause_adv;
1699
1700         pause_adv = bnx2_phy_get_pause_adv(bp);
1701
1702         if (bp->autoneg & AUTONEG_SPEED) {
1703                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1704                 if (bp->advertising & ADVERTISED_10baseT_Half)
1705                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1706                 if (bp->advertising & ADVERTISED_10baseT_Full)
1707                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1708                 if (bp->advertising & ADVERTISED_100baseT_Half)
1709                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1710                 if (bp->advertising & ADVERTISED_100baseT_Full)
1711                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1712                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1713                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1714                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1715                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1716         } else {
1717                 if (bp->req_line_speed == SPEED_2500)
1718                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1719                 else if (bp->req_line_speed == SPEED_1000)
1720                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1721                 else if (bp->req_line_speed == SPEED_100) {
1722                         if (bp->req_duplex == DUPLEX_FULL)
1723                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1724                         else
1725                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1726                 } else if (bp->req_line_speed == SPEED_10) {
1727                         if (bp->req_duplex == DUPLEX_FULL)
1728                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1729                         else
1730                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1731                 }
1732         }
1733
1734         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1735                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1736         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1737                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1738
1739         if (port == PORT_TP)
1740                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1741                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1742
1743         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1744
1745         spin_unlock_bh(&bp->phy_lock);
1746         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1747         spin_lock_bh(&bp->phy_lock);
1748
1749         return 0;
1750 }
1751
1752 static int
1753 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1754 __releases(&bp->phy_lock)
1755 __acquires(&bp->phy_lock)
1756 {
1757         u32 adv, bmcr;
1758         u32 new_adv = 0;
1759
1760         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1761                 return bnx2_setup_remote_phy(bp, port);
1762
1763         if (!(bp->autoneg & AUTONEG_SPEED)) {
1764                 u32 new_bmcr;
1765                 int force_link_down = 0;
1766
1767                 if (bp->req_line_speed == SPEED_2500) {
1768                         if (!bnx2_test_and_enable_2g5(bp))
1769                                 force_link_down = 1;
1770                 } else if (bp->req_line_speed == SPEED_1000) {
1771                         if (bnx2_test_and_disable_2g5(bp))
1772                                 force_link_down = 1;
1773                 }
1774                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1775                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1776
1777                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1778                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1779                 new_bmcr |= BMCR_SPEED1000;
1780
1781                 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1782                         if (bp->req_line_speed == SPEED_2500)
1783                                 bnx2_enable_forced_2g5(bp);
1784                         else if (bp->req_line_speed == SPEED_1000) {
1785                                 bnx2_disable_forced_2g5(bp);
1786                                 new_bmcr &= ~0x2000;
1787                         }
1788
1789                 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1790                         if (bp->req_line_speed == SPEED_2500)
1791                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1792                         else
1793                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1794                 }
1795
1796                 if (bp->req_duplex == DUPLEX_FULL) {
1797                         adv |= ADVERTISE_1000XFULL;
1798                         new_bmcr |= BMCR_FULLDPLX;
1799                 }
1800                 else {
1801                         adv |= ADVERTISE_1000XHALF;
1802                         new_bmcr &= ~BMCR_FULLDPLX;
1803                 }
1804                 if ((new_bmcr != bmcr) || (force_link_down)) {
1805                         /* Force a link down visible on the other side */
1806                         if (bp->link_up) {
1807                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1808                                                ~(ADVERTISE_1000XFULL |
1809                                                  ADVERTISE_1000XHALF));
1810                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1811                                         BMCR_ANRESTART | BMCR_ANENABLE);
1812
1813                                 bp->link_up = 0;
1814                                 netif_carrier_off(bp->dev);
1815                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1816                                 bnx2_report_link(bp);
1817                         }
1818                         bnx2_write_phy(bp, bp->mii_adv, adv);
1819                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1820                 } else {
1821                         bnx2_resolve_flow_ctrl(bp);
1822                         bnx2_set_mac_link(bp);
1823                 }
1824                 return 0;
1825         }
1826
1827         bnx2_test_and_enable_2g5(bp);
1828
1829         if (bp->advertising & ADVERTISED_1000baseT_Full)
1830                 new_adv |= ADVERTISE_1000XFULL;
1831
1832         new_adv |= bnx2_phy_get_pause_adv(bp);
1833
1834         bnx2_read_phy(bp, bp->mii_adv, &adv);
1835         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1836
1837         bp->serdes_an_pending = 0;
1838         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1839                 /* Force a link down visible on the other side */
1840                 if (bp->link_up) {
1841                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1842                         spin_unlock_bh(&bp->phy_lock);
1843                         msleep(20);
1844                         spin_lock_bh(&bp->phy_lock);
1845                 }
1846
1847                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1848                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1849                         BMCR_ANENABLE);
1850                 /* Speed up link-up time when the link partner
1851                  * does not autonegotiate which is very common
1852                  * in blade servers. Some blade servers use
1853                  * IPMI for kerboard input and it's important
1854                  * to minimize link disruptions. Autoneg. involves
1855                  * exchanging base pages plus 3 next pages and
1856                  * normally completes in about 120 msec.
1857                  */
1858                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1859                 bp->serdes_an_pending = 1;
1860                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1861         } else {
1862                 bnx2_resolve_flow_ctrl(bp);
1863                 bnx2_set_mac_link(bp);
1864         }
1865
1866         return 0;
1867 }
1868
1869 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1870         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1871                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1872                 (ADVERTISED_1000baseT_Full)
1873
1874 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1875         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1876         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1877         ADVERTISED_1000baseT_Full)
1878
1879 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1880         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1881
1882 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1883
1884 static void
1885 bnx2_set_default_remote_link(struct bnx2 *bp)
1886 {
1887         u32 link;
1888
1889         if (bp->phy_port == PORT_TP)
1890                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1891         else
1892                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1893
1894         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1895                 bp->req_line_speed = 0;
1896                 bp->autoneg |= AUTONEG_SPEED;
1897                 bp->advertising = ADVERTISED_Autoneg;
1898                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1899                         bp->advertising |= ADVERTISED_10baseT_Half;
1900                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1901                         bp->advertising |= ADVERTISED_10baseT_Full;
1902                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1903                         bp->advertising |= ADVERTISED_100baseT_Half;
1904                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1905                         bp->advertising |= ADVERTISED_100baseT_Full;
1906                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1907                         bp->advertising |= ADVERTISED_1000baseT_Full;
1908                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1909                         bp->advertising |= ADVERTISED_2500baseX_Full;
1910         } else {
1911                 bp->autoneg = 0;
1912                 bp->advertising = 0;
1913                 bp->req_duplex = DUPLEX_FULL;
1914                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1915                         bp->req_line_speed = SPEED_10;
1916                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1917                                 bp->req_duplex = DUPLEX_HALF;
1918                 }
1919                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1920                         bp->req_line_speed = SPEED_100;
1921                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1922                                 bp->req_duplex = DUPLEX_HALF;
1923                 }
1924                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1925                         bp->req_line_speed = SPEED_1000;
1926                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1927                         bp->req_line_speed = SPEED_2500;
1928         }
1929 }
1930
1931 static void
1932 bnx2_set_default_link(struct bnx2 *bp)
1933 {
1934         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1935                 bnx2_set_default_remote_link(bp);
1936                 return;
1937         }
1938
1939         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1940         bp->req_line_speed = 0;
1941         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1942                 u32 reg;
1943
1944                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1945
1946                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1947                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1948                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1949                         bp->autoneg = 0;
1950                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1951                         bp->req_duplex = DUPLEX_FULL;
1952                 }
1953         } else
1954                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1955 }
1956
1957 static void
1958 bnx2_send_heart_beat(struct bnx2 *bp)
1959 {
1960         u32 msg;
1961         u32 addr;
1962
1963         spin_lock(&bp->indirect_lock);
1964         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1965         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1966         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1967         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1968         spin_unlock(&bp->indirect_lock);
1969 }
1970
1971 static void
1972 bnx2_remote_phy_event(struct bnx2 *bp)
1973 {
1974         u32 msg;
1975         u8 link_up = bp->link_up;
1976         u8 old_port;
1977
1978         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1979
1980         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1981                 bnx2_send_heart_beat(bp);
1982
1983         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1984
1985         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1986                 bp->link_up = 0;
1987         else {
1988                 u32 speed;
1989
1990                 bp->link_up = 1;
1991                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1992                 bp->duplex = DUPLEX_FULL;
1993                 switch (speed) {
1994                         case BNX2_LINK_STATUS_10HALF:
1995                                 bp->duplex = DUPLEX_HALF;
1996                                 /* fall through */
1997                         case BNX2_LINK_STATUS_10FULL:
1998                                 bp->line_speed = SPEED_10;
1999                                 break;
2000                         case BNX2_LINK_STATUS_100HALF:
2001                                 bp->duplex = DUPLEX_HALF;
2002                                 /* fall through */
2003                         case BNX2_LINK_STATUS_100BASE_T4:
2004                         case BNX2_LINK_STATUS_100FULL:
2005                                 bp->line_speed = SPEED_100;
2006                                 break;
2007                         case BNX2_LINK_STATUS_1000HALF:
2008                                 bp->duplex = DUPLEX_HALF;
2009                                 /* fall through */
2010                         case BNX2_LINK_STATUS_1000FULL:
2011                                 bp->line_speed = SPEED_1000;
2012                                 break;
2013                         case BNX2_LINK_STATUS_2500HALF:
2014                                 bp->duplex = DUPLEX_HALF;
2015                                 /* fall through */
2016                         case BNX2_LINK_STATUS_2500FULL:
2017                                 bp->line_speed = SPEED_2500;
2018                                 break;
2019                         default:
2020                                 bp->line_speed = 0;
2021                                 break;
2022                 }
2023
2024                 bp->flow_ctrl = 0;
2025                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2026                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2027                         if (bp->duplex == DUPLEX_FULL)
2028                                 bp->flow_ctrl = bp->req_flow_ctrl;
2029                 } else {
2030                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2031                                 bp->flow_ctrl |= FLOW_CTRL_TX;
2032                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2033                                 bp->flow_ctrl |= FLOW_CTRL_RX;
2034                 }
2035
2036                 old_port = bp->phy_port;
2037                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2038                         bp->phy_port = PORT_FIBRE;
2039                 else
2040                         bp->phy_port = PORT_TP;
2041
2042                 if (old_port != bp->phy_port)
2043                         bnx2_set_default_link(bp);
2044
2045         }
2046         if (bp->link_up != link_up)
2047                 bnx2_report_link(bp);
2048
2049         bnx2_set_mac_link(bp);
2050 }
2051
2052 static int
2053 bnx2_set_remote_link(struct bnx2 *bp)
2054 {
2055         u32 evt_code;
2056
2057         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2058         switch (evt_code) {
2059                 case BNX2_FW_EVT_CODE_LINK_EVENT:
2060                         bnx2_remote_phy_event(bp);
2061                         break;
2062                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2063                 default:
2064                         bnx2_send_heart_beat(bp);
2065                         break;
2066         }
2067         return 0;
2068 }
2069
2070 static int
2071 bnx2_setup_copper_phy(struct bnx2 *bp)
2072 __releases(&bp->phy_lock)
2073 __acquires(&bp->phy_lock)
2074 {
2075         u32 bmcr, adv_reg, new_adv = 0;
2076         u32 new_bmcr;
2077
2078         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2079
2080         bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2081         adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2082                     ADVERTISE_PAUSE_ASYM);
2083
2084         new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising);
2085
2086         if (bp->autoneg & AUTONEG_SPEED) {
2087                 u32 adv1000_reg;
2088                 u32 new_adv1000 = 0;
2089
2090                 new_adv |= bnx2_phy_get_pause_adv(bp);
2091
2092                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2093                 adv1000_reg &= PHY_ALL_1000_SPEED;
2094
2095                 new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2096                 if ((adv1000_reg != new_adv1000) ||
2097                         (adv_reg != new_adv) ||
2098                         ((bmcr & BMCR_ANENABLE) == 0)) {
2099
2100                         bnx2_write_phy(bp, bp->mii_adv, new_adv);
2101                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2102                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2103                                 BMCR_ANENABLE);
2104                 }
2105                 else if (bp->link_up) {
2106                         /* Flow ctrl may have changed from auto to forced */
2107                         /* or vice-versa. */
2108
2109                         bnx2_resolve_flow_ctrl(bp);
2110                         bnx2_set_mac_link(bp);
2111                 }
2112                 return 0;
2113         }
2114
2115         /* advertise nothing when forcing speed */
2116         if (adv_reg != new_adv)
2117                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
2118
2119         new_bmcr = 0;
2120         if (bp->req_line_speed == SPEED_100) {
2121                 new_bmcr |= BMCR_SPEED100;
2122         }
2123         if (bp->req_duplex == DUPLEX_FULL) {
2124                 new_bmcr |= BMCR_FULLDPLX;
2125         }
2126         if (new_bmcr != bmcr) {
2127                 u32 bmsr;
2128
2129                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2130                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2131
2132                 if (bmsr & BMSR_LSTATUS) {
2133                         /* Force link down */
2134                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2135                         spin_unlock_bh(&bp->phy_lock);
2136                         msleep(50);
2137                         spin_lock_bh(&bp->phy_lock);
2138
2139                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2140                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2141                 }
2142
2143                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2144
2145                 /* Normally, the new speed is setup after the link has
2146                  * gone down and up again. In some cases, link will not go
2147                  * down so we need to set up the new speed here.
2148                  */
2149                 if (bmsr & BMSR_LSTATUS) {
2150                         bp->line_speed = bp->req_line_speed;
2151                         bp->duplex = bp->req_duplex;
2152                         bnx2_resolve_flow_ctrl(bp);
2153                         bnx2_set_mac_link(bp);
2154                 }
2155         } else {
2156                 bnx2_resolve_flow_ctrl(bp);
2157                 bnx2_set_mac_link(bp);
2158         }
2159         return 0;
2160 }
2161
2162 static int
2163 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2164 __releases(&bp->phy_lock)
2165 __acquires(&bp->phy_lock)
2166 {
2167         if (bp->loopback == MAC_LOOPBACK)
2168                 return 0;
2169
2170         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2171                 return bnx2_setup_serdes_phy(bp, port);
2172         }
2173         else {
2174                 return bnx2_setup_copper_phy(bp);
2175         }
2176 }
2177
2178 static int
2179 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2180 {
2181         u32 val;
2182
2183         bp->mii_bmcr = MII_BMCR + 0x10;
2184         bp->mii_bmsr = MII_BMSR + 0x10;
2185         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2186         bp->mii_adv = MII_ADVERTISE + 0x10;
2187         bp->mii_lpa = MII_LPA + 0x10;
2188         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2189
2190         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2191         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2192
2193         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2194         if (reset_phy)
2195                 bnx2_reset_phy(bp);
2196
2197         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2198
2199         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2200         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2201         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2202         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2203
2204         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2205         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2206         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2207                 val |= BCM5708S_UP1_2G5;
2208         else
2209                 val &= ~BCM5708S_UP1_2G5;
2210         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2211
2212         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2213         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2214         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2215         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2216
2217         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2218
2219         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2220               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2221         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2222
2223         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2224
2225         return 0;
2226 }
2227
2228 static int
2229 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2230 {
2231         u32 val;
2232
2233         if (reset_phy)
2234                 bnx2_reset_phy(bp);
2235
2236         bp->mii_up1 = BCM5708S_UP1;
2237
2238         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2239         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2240         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2241
2242         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2243         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2244         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2245
2246         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2247         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2248         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2249
2250         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2251                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2252                 val |= BCM5708S_UP1_2G5;
2253                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2254         }
2255
2256         if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
2257             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
2258             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
2259                 /* increase tx signal amplitude */
2260                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2261                                BCM5708S_BLK_ADDR_TX_MISC);
2262                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2263                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2264                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2265                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2266         }
2267
2268         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2269               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2270
2271         if (val) {
2272                 u32 is_backplane;
2273
2274                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2275                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2276                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2277                                        BCM5708S_BLK_ADDR_TX_MISC);
2278                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2279                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2280                                        BCM5708S_BLK_ADDR_DIG);
2281                 }
2282         }
2283         return 0;
2284 }
2285
2286 static int
2287 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2288 {
2289         if (reset_phy)
2290                 bnx2_reset_phy(bp);
2291
2292         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2293
2294         if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2295                 BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2296
2297         if (bp->dev->mtu > 1500) {
2298                 u32 val;
2299
2300                 /* Set extended packet length bit */
2301                 bnx2_write_phy(bp, 0x18, 0x7);
2302                 bnx2_read_phy(bp, 0x18, &val);
2303                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2304
2305                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2306                 bnx2_read_phy(bp, 0x1c, &val);
2307                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2308         }
2309         else {
2310                 u32 val;
2311
2312                 bnx2_write_phy(bp, 0x18, 0x7);
2313                 bnx2_read_phy(bp, 0x18, &val);
2314                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2315
2316                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2317                 bnx2_read_phy(bp, 0x1c, &val);
2318                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2319         }
2320
2321         return 0;
2322 }
2323
2324 static int
2325 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2326 {
2327         u32 val;
2328
2329         if (reset_phy)
2330                 bnx2_reset_phy(bp);
2331
2332         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2333                 bnx2_write_phy(bp, 0x18, 0x0c00);
2334                 bnx2_write_phy(bp, 0x17, 0x000a);
2335                 bnx2_write_phy(bp, 0x15, 0x310b);
2336                 bnx2_write_phy(bp, 0x17, 0x201f);
2337                 bnx2_write_phy(bp, 0x15, 0x9506);
2338                 bnx2_write_phy(bp, 0x17, 0x401f);
2339                 bnx2_write_phy(bp, 0x15, 0x14e2);
2340                 bnx2_write_phy(bp, 0x18, 0x0400);
2341         }
2342
2343         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2344                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2345                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2346                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2347                 val &= ~(1 << 8);
2348                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2349         }
2350
2351         if (bp->dev->mtu > 1500) {
2352                 /* Set extended packet length bit */
2353                 bnx2_write_phy(bp, 0x18, 0x7);
2354                 bnx2_read_phy(bp, 0x18, &val);
2355                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2356
2357                 bnx2_read_phy(bp, 0x10, &val);
2358                 bnx2_write_phy(bp, 0x10, val | 0x1);
2359         }
2360         else {
2361                 bnx2_write_phy(bp, 0x18, 0x7);
2362                 bnx2_read_phy(bp, 0x18, &val);
2363                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2364
2365                 bnx2_read_phy(bp, 0x10, &val);
2366                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2367         }
2368
2369         /* ethernet@wirespeed */
2370         bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL);
2371         bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val);
2372         val |=  AUX_CTL_MISC_CTL_WR | AUX_CTL_MISC_CTL_WIRESPEED;
2373
2374         /* auto-mdix */
2375         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2376                 val |=  AUX_CTL_MISC_CTL_AUTOMDIX;
2377
2378         bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val);
2379         return 0;
2380 }
2381
2382
2383 static int
2384 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2385 __releases(&bp->phy_lock)
2386 __acquires(&bp->phy_lock)
2387 {
2388         u32 val;
2389         int rc = 0;
2390
2391         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2392         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2393
2394         bp->mii_bmcr = MII_BMCR;
2395         bp->mii_bmsr = MII_BMSR;
2396         bp->mii_bmsr1 = MII_BMSR;
2397         bp->mii_adv = MII_ADVERTISE;
2398         bp->mii_lpa = MII_LPA;
2399
2400         BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2401
2402         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2403                 goto setup_phy;
2404
2405         bnx2_read_phy(bp, MII_PHYSID1, &val);
2406         bp->phy_id = val << 16;
2407         bnx2_read_phy(bp, MII_PHYSID2, &val);
2408         bp->phy_id |= val & 0xffff;
2409
2410         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2411                 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2412                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2413                 else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
2414                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2415                 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2416                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2417         }
2418         else {
2419                 rc = bnx2_init_copper_phy(bp, reset_phy);
2420         }
2421
2422 setup_phy:
2423         if (!rc)
2424                 rc = bnx2_setup_phy(bp, bp->phy_port);
2425
2426         return rc;
2427 }
2428
2429 static int
2430 bnx2_set_mac_loopback(struct bnx2 *bp)
2431 {
2432         u32 mac_mode;
2433
2434         mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2435         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2436         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2437         BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2438         bp->link_up = 1;
2439         return 0;
2440 }
2441
2442 static int bnx2_test_link(struct bnx2 *);
2443
2444 static int
2445 bnx2_set_phy_loopback(struct bnx2 *bp)
2446 {
2447         u32 mac_mode;
2448         int rc, i;
2449
2450         spin_lock_bh(&bp->phy_lock);
2451         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2452                             BMCR_SPEED1000);
2453         spin_unlock_bh(&bp->phy_lock);
2454         if (rc)
2455                 return rc;
2456
2457         for (i = 0; i < 10; i++) {
2458                 if (bnx2_test_link(bp) == 0)
2459                         break;
2460                 msleep(100);
2461         }
2462
2463         mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2464         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2465                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2466                       BNX2_EMAC_MODE_25G_MODE);
2467
2468         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2469         BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2470         bp->link_up = 1;
2471         return 0;
2472 }
2473
2474 static void
2475 bnx2_dump_mcp_state(struct bnx2 *bp)
2476 {
2477         struct net_device *dev = bp->dev;
2478         u32 mcp_p0, mcp_p1;
2479
2480         netdev_err(dev, "<--- start MCP states dump --->\n");
2481         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
2482                 mcp_p0 = BNX2_MCP_STATE_P0;
2483                 mcp_p1 = BNX2_MCP_STATE_P1;
2484         } else {
2485                 mcp_p0 = BNX2_MCP_STATE_P0_5708;
2486                 mcp_p1 = BNX2_MCP_STATE_P1_5708;
2487         }
2488         netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2489                    bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2490         netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2491                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2492                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2493                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2494         netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2495                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2496                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2497                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2498         netdev_err(dev, "DEBUG: shmem states:\n");
2499         netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2500                    bnx2_shmem_rd(bp, BNX2_DRV_MB),
2501                    bnx2_shmem_rd(bp, BNX2_FW_MB),
2502                    bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2503         pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2504         netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2505                    bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2506                    bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2507         pr_cont(" condition[%08x]\n",
2508                 bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2509         DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
2510         DP_SHMEM_LINE(bp, 0x3cc);
2511         DP_SHMEM_LINE(bp, 0x3dc);
2512         DP_SHMEM_LINE(bp, 0x3ec);
2513         netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2514         netdev_err(dev, "<--- end MCP states dump --->\n");
2515 }
2516
2517 static int
2518 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2519 {
2520         int i;
2521         u32 val;
2522
2523         bp->fw_wr_seq++;
2524         msg_data |= bp->fw_wr_seq;
2525         bp->fw_last_msg = msg_data;
2526
2527         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2528
2529         if (!ack)
2530                 return 0;
2531
2532         /* wait for an acknowledgement. */
2533         for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2534                 msleep(10);
2535
2536                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2537
2538                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2539                         break;
2540         }
2541         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2542                 return 0;
2543
2544         /* If we timed out, inform the firmware that this is the case. */
2545         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2546                 msg_data &= ~BNX2_DRV_MSG_CODE;
2547                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2548
2549                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2550                 if (!silent) {
2551                         pr_err("fw sync timeout, reset code = %x\n", msg_data);
2552                         bnx2_dump_mcp_state(bp);
2553                 }
2554
2555                 return -EBUSY;
2556         }
2557
2558         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2559                 return -EIO;
2560
2561         return 0;
2562 }
2563
2564 static int
2565 bnx2_init_5709_context(struct bnx2 *bp)
2566 {
2567         int i, ret = 0;
2568         u32 val;
2569
2570         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2571         val |= (BNX2_PAGE_BITS - 8) << 16;
2572         BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2573         for (i = 0; i < 10; i++) {
2574                 val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2575                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2576                         break;
2577                 udelay(2);
2578         }
2579         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2580                 return -EBUSY;
2581
2582         for (i = 0; i < bp->ctx_pages; i++) {
2583                 int j;
2584
2585                 if (bp->ctx_blk[i])
2586                         memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2587                 else
2588                         return -ENOMEM;
2589
2590                 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2591                         (bp->ctx_blk_mapping[i] & 0xffffffff) |
2592                         BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2593                 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2594                         (u64) bp->ctx_blk_mapping[i] >> 32);
2595                 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2596                         BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2597                 for (j = 0; j < 10; j++) {
2598
2599                         val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2600                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2601                                 break;
2602                         udelay(5);
2603                 }
2604                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2605                         ret = -EBUSY;
2606                         break;
2607                 }
2608         }
2609         return ret;
2610 }
2611
2612 static void
2613 bnx2_init_context(struct bnx2 *bp)
2614 {
2615         u32 vcid;
2616
2617         vcid = 96;
2618         while (vcid) {
2619                 u32 vcid_addr, pcid_addr, offset;
2620                 int i;
2621
2622                 vcid--;
2623
2624                 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
2625                         u32 new_vcid;
2626
2627                         vcid_addr = GET_PCID_ADDR(vcid);
2628                         if (vcid & 0x8) {
2629                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2630                         }
2631                         else {
2632                                 new_vcid = vcid;
2633                         }
2634                         pcid_addr = GET_PCID_ADDR(new_vcid);
2635                 }
2636                 else {
2637                         vcid_addr = GET_CID_ADDR(vcid);
2638                         pcid_addr = vcid_addr;
2639                 }
2640
2641                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2642                         vcid_addr += (i << PHY_CTX_SHIFT);
2643                         pcid_addr += (i << PHY_CTX_SHIFT);
2644
2645                         BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2646                         BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2647
2648                         /* Zero out the context. */
2649                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2650                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2651                 }
2652         }
2653 }
2654
2655 static int
2656 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2657 {
2658         u16 *good_mbuf;
2659         u32 good_mbuf_cnt;
2660         u32 val;
2661
2662         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2663         if (good_mbuf == NULL)
2664                 return -ENOMEM;
2665
2666         BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2667                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2668
2669         good_mbuf_cnt = 0;
2670
2671         /* Allocate a bunch of mbufs and save the good ones in an array. */
2672         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2673         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2674                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2675                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2676
2677                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2678
2679                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2680
2681                 /* The addresses with Bit 9 set are bad memory blocks. */
2682                 if (!(val & (1 << 9))) {
2683                         good_mbuf[good_mbuf_cnt] = (u16) val;
2684                         good_mbuf_cnt++;
2685                 }
2686
2687                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2688         }
2689
2690         /* Free the good ones back to the mbuf pool thus discarding
2691          * all the bad ones. */
2692         while (good_mbuf_cnt) {
2693                 good_mbuf_cnt--;
2694
2695                 val = good_mbuf[good_mbuf_cnt];
2696                 val = (val << 9) | val | 1;
2697
2698                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2699         }
2700         kfree(good_mbuf);
2701         return 0;
2702 }
2703
2704 static void
2705 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2706 {
2707         u32 val;
2708
2709         val = (mac_addr[0] << 8) | mac_addr[1];
2710
2711         BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2712
2713         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2714                 (mac_addr[4] << 8) | mac_addr[5];
2715
2716         BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2717 }
2718
2719 static inline int
2720 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2721 {
2722         dma_addr_t mapping;
2723         struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2724         struct bnx2_rx_bd *rxbd =
2725                 &rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2726         struct page *page = alloc_page(gfp);
2727
2728         if (!page)
2729                 return -ENOMEM;
2730         mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2731                                PCI_DMA_FROMDEVICE);
2732         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2733                 __free_page(page);
2734                 return -EIO;
2735         }
2736
2737         rx_pg->page = page;
2738         dma_unmap_addr_set(rx_pg, mapping, mapping);
2739         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2740         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2741         return 0;
2742 }
2743
2744 static void
2745 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2746 {
2747         struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2748         struct page *page = rx_pg->page;
2749
2750         if (!page)
2751                 return;
2752
2753         dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2754                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2755
2756         __free_page(page);
2757         rx_pg->page = NULL;
2758 }
2759
2760 static inline int
2761 bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2762 {
2763         u8 *data;
2764         struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2765         dma_addr_t mapping;
2766         struct bnx2_rx_bd *rxbd =
2767                 &rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2768
2769         data = kmalloc(bp->rx_buf_size, gfp);
2770         if (!data)
2771                 return -ENOMEM;
2772
2773         mapping = dma_map_single(&bp->pdev->dev,
2774                                  get_l2_fhdr(data),
2775                                  bp->rx_buf_use_size,
2776                                  PCI_DMA_FROMDEVICE);
2777         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2778                 kfree(data);
2779                 return -EIO;
2780         }
2781
2782         rx_buf->data = data;
2783         dma_unmap_addr_set(rx_buf, mapping, mapping);
2784
2785         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2786         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2787
2788         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2789
2790         return 0;
2791 }
2792
2793 static int
2794 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2795 {
2796         struct status_block *sblk = bnapi->status_blk.msi;
2797         u32 new_link_state, old_link_state;
2798         int is_set = 1;
2799
2800         new_link_state = sblk->status_attn_bits & event;
2801         old_link_state = sblk->status_attn_bits_ack & event;
2802         if (new_link_state != old_link_state) {
2803                 if (new_link_state)
2804                         BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2805                 else
2806                         BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2807         } else
2808                 is_set = 0;
2809
2810         return is_set;
2811 }
2812
2813 static void
2814 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2815 {
2816         spin_lock(&bp->phy_lock);
2817
2818         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2819                 bnx2_set_link(bp);
2820         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2821                 bnx2_set_remote_link(bp);
2822
2823         spin_unlock(&bp->phy_lock);
2824
2825 }
2826
2827 static inline u16
2828 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2829 {
2830         u16 cons;
2831
2832         /* Tell compiler that status block fields can change. */
2833         barrier();
2834         cons = *bnapi->hw_tx_cons_ptr;
2835         barrier();
2836         if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
2837                 cons++;
2838         return cons;
2839 }
2840
2841 static int
2842 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2843 {
2844         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2845         u16 hw_cons, sw_cons, sw_ring_cons;
2846         int tx_pkt = 0, index;
2847         unsigned int tx_bytes = 0;
2848         struct netdev_queue *txq;
2849
2850         index = (bnapi - bp->bnx2_napi);
2851         txq = netdev_get_tx_queue(bp->dev, index);
2852
2853         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2854         sw_cons = txr->tx_cons;
2855
2856         while (sw_cons != hw_cons) {
2857                 struct bnx2_sw_tx_bd *tx_buf;
2858                 struct sk_buff *skb;
2859                 int i, last;
2860
2861                 sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
2862
2863                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2864                 skb = tx_buf->skb;
2865
2866                 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2867                 prefetch(&skb->end);
2868
2869                 /* partial BD completions possible with TSO packets */
2870                 if (tx_buf->is_gso) {
2871                         u16 last_idx, last_ring_idx;
2872
2873                         last_idx = sw_cons + tx_buf->nr_frags + 1;
2874                         last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2875                         if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
2876                                 last_idx++;
2877                         }
2878                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2879                                 break;
2880                         }
2881                 }
2882
2883                 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2884                         skb_headlen(skb), PCI_DMA_TODEVICE);
2885
2886                 tx_buf->skb = NULL;
2887                 last = tx_buf->nr_frags;
2888
2889                 for (i = 0; i < last; i++) {
2890                         struct bnx2_sw_tx_bd *tx_buf;
2891
2892                         sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2893
2894                         tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
2895                         dma_unmap_page(&bp->pdev->dev,
2896                                 dma_unmap_addr(tx_buf, mapping),
2897                                 skb_frag_size(&skb_shinfo(skb)->frags[i]),
2898                                 PCI_DMA_TODEVICE);
2899                 }
2900
2901                 sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2902
2903                 tx_bytes += skb->len;
2904                 dev_kfree_skb_any(skb);
2905                 tx_pkt++;
2906                 if (tx_pkt == budget)
2907                         break;
2908
2909                 if (hw_cons == sw_cons)
2910                         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2911         }
2912
2913         netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2914         txr->hw_tx_cons = hw_cons;
2915         txr->tx_cons = sw_cons;
2916
2917         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2918          * before checking for netif_tx_queue_stopped().  Without the
2919          * memory barrier, there is a small possibility that bnx2_start_xmit()
2920          * will miss it and cause the queue to be stopped forever.
2921          */
2922         smp_mb();
2923
2924         if (unlikely(netif_tx_queue_stopped(txq)) &&
2925                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2926                 __netif_tx_lock(txq, smp_processor_id());
2927                 if ((netif_tx_queue_stopped(txq)) &&
2928                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2929                         netif_tx_wake_queue(txq);
2930                 __netif_tx_unlock(txq);
2931         }
2932
2933         return tx_pkt;
2934 }
2935
2936 static void
2937 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2938                         struct sk_buff *skb, int count)
2939 {
2940         struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
2941         struct bnx2_rx_bd *cons_bd, *prod_bd;
2942         int i;
2943         u16 hw_prod, prod;
2944         u16 cons = rxr->rx_pg_cons;
2945
2946         cons_rx_pg = &rxr->rx_pg_ring[cons];
2947
2948         /* The caller was unable to allocate a new page to replace the
2949          * last one in the frags array, so we need to recycle that page
2950          * and then free the skb.
2951          */
2952         if (skb) {
2953                 struct page *page;
2954                 struct skb_shared_info *shinfo;
2955
2956                 shinfo = skb_shinfo(skb);
2957                 shinfo->nr_frags--;
2958                 page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2959                 __skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
2960
2961                 cons_rx_pg->page = page;
2962                 dev_kfree_skb(skb);
2963         }
2964
2965         hw_prod = rxr->rx_pg_prod;
2966
2967         for (i = 0; i < count; i++) {
2968                 prod = BNX2_RX_PG_RING_IDX(hw_prod);
2969
2970                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2971                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2972                 cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
2973                                                 [BNX2_RX_IDX(cons)];
2974                 prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
2975                                                 [BNX2_RX_IDX(prod)];
2976
2977                 if (prod != cons) {
2978                         prod_rx_pg->page = cons_rx_pg->page;
2979                         cons_rx_pg->page = NULL;
2980                         dma_unmap_addr_set(prod_rx_pg, mapping,
2981                                 dma_unmap_addr(cons_rx_pg, mapping));
2982
2983                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2984                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2985
2986                 }
2987                 cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
2988                 hw_prod = BNX2_NEXT_RX_BD(hw_prod);
2989         }
2990         rxr->rx_pg_prod = hw_prod;
2991         rxr->rx_pg_cons = cons;
2992 }
2993
2994 static inline void
2995 bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2996                    u8 *data, u16 cons, u16 prod)
2997 {
2998         struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
2999         struct bnx2_rx_bd *cons_bd, *prod_bd;
3000
3001         cons_rx_buf = &rxr->rx_buf_ring[cons];
3002         prod_rx_buf = &rxr->rx_buf_ring[prod];
3003
3004         dma_sync_single_for_device(&bp->pdev->dev,
3005                 dma_unmap_addr(cons_rx_buf, mapping),
3006                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
3007
3008         rxr->rx_prod_bseq += bp->rx_buf_use_size;
3009
3010         prod_rx_buf->data = data;
3011
3012         if (cons == prod)
3013                 return;
3014
3015         dma_unmap_addr_set(prod_rx_buf, mapping,
3016                         dma_unmap_addr(cons_rx_buf, mapping));
3017
3018         cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
3019         prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
3020         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
3021         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
3022 }
3023
3024 static struct sk_buff *
3025 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
3026             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
3027             u32 ring_idx)
3028 {
3029         int err;
3030         u16 prod = ring_idx & 0xffff;
3031         struct sk_buff *skb;
3032
3033         err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3034         if (unlikely(err)) {
3035                 bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
3036 error:
3037                 if (hdr_len) {
3038                         unsigned int raw_len = len + 4;
3039                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3040
3041                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3042                 }
3043                 return NULL;
3044         }
3045
3046         dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3047                          PCI_DMA_FROMDEVICE);
3048         skb = build_skb(data, 0);
3049         if (!skb) {
3050                 kfree(data);
3051                 goto error;
3052         }
3053         skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3054         if (hdr_len == 0) {
3055                 skb_put(skb, len);
3056                 return skb;
3057         } else {
3058                 unsigned int i, frag_len, frag_size, pages;
3059                 struct bnx2_sw_pg *rx_pg;
3060                 u16 pg_cons = rxr->rx_pg_cons;
3061                 u16 pg_prod = rxr->rx_pg_prod;
3062
3063                 frag_size = len + 4 - hdr_len;
3064                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3065                 skb_put(skb, hdr_len);
3066
3067                 for (i = 0; i < pages; i++) {
3068                         dma_addr_t mapping_old;
3069
3070                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3071                         if (unlikely(frag_len <= 4)) {
3072                                 unsigned int tail = 4 - frag_len;
3073
3074                                 rxr->rx_pg_cons = pg_cons;
3075                                 rxr->rx_pg_prod = pg_prod;
3076                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3077                                                         pages - i);
3078                                 skb->len -= tail;
3079                                 if (i == 0) {
3080                                         skb->tail -= tail;
3081                                 } else {
3082                                         skb_frag_t *frag =
3083                                                 &skb_shinfo(skb)->frags[i - 1];
3084                                         skb_frag_size_sub(frag, tail);
3085                                         skb->data_len -= tail;
3086                                 }
3087                                 return skb;
3088                         }
3089                         rx_pg = &rxr->rx_pg_ring[pg_cons];
3090
3091                         /* Don't unmap yet.  If we're unable to allocate a new
3092                          * page, we need to recycle the page and the DMA addr.
3093                          */
3094                         mapping_old = dma_unmap_addr(rx_pg, mapping);
3095                         if (i == pages - 1)
3096                                 frag_len -= 4;
3097
3098                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3099                         rx_pg->page = NULL;
3100
3101                         err = bnx2_alloc_rx_page(bp, rxr,
3102                                                  BNX2_RX_PG_RING_IDX(pg_prod),
3103                                                  GFP_ATOMIC);
3104                         if (unlikely(err)) {
3105                                 rxr->rx_pg_cons = pg_cons;
3106                                 rxr->rx_pg_prod = pg_prod;
3107                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3108                                                         pages - i);
3109                                 return NULL;
3110                         }
3111
3112                         dma_unmap_page(&bp->pdev->dev, mapping_old,
3113                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
3114
3115                         frag_size -= frag_len;
3116                         skb->data_len += frag_len;
3117                         skb->truesize += PAGE_SIZE;
3118                         skb->len += frag_len;
3119
3120                         pg_prod = BNX2_NEXT_RX_BD(pg_prod);
3121                         pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
3122                 }
3123                 rxr->rx_pg_prod = pg_prod;
3124                 rxr->rx_pg_cons = pg_cons;
3125         }
3126         return skb;
3127 }
3128
3129 static inline u16
3130 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3131 {
3132         u16 cons;
3133
3134         /* Tell compiler that status block fields can change. */
3135         barrier();
3136         cons = *bnapi->hw_rx_cons_ptr;
3137         barrier();
3138         if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
3139                 cons++;
3140         return cons;
3141 }
3142
3143 static int
3144 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3145 {
3146         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3147         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3148         struct l2_fhdr *rx_hdr;
3149         int rx_pkt = 0, pg_ring_used = 0;
3150
3151         if (budget <= 0)
3152                 return rx_pkt;
3153
3154         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3155         sw_cons = rxr->rx_cons;
3156         sw_prod = rxr->rx_prod;
3157
3158         /* Memory barrier necessary as speculative reads of the rx
3159          * buffer can be ahead of the index in the status block
3160          */
3161         rmb();
3162         while (sw_cons != hw_cons) {
3163                 unsigned int len, hdr_len;
3164                 u32 status;
3165                 struct bnx2_sw_bd *rx_buf, *next_rx_buf;
3166                 struct sk_buff *skb;
3167                 dma_addr_t dma_addr;
3168                 u8 *data;
3169                 u16 next_ring_idx;
3170
3171                 sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
3172                 sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
3173
3174                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3175                 data = rx_buf->data;
3176                 rx_buf->data = NULL;
3177
3178                 rx_hdr = get_l2_fhdr(data);
3179                 prefetch(rx_hdr);
3180
3181                 dma_addr = dma_unmap_addr(rx_buf, mapping);
3182
3183                 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3184                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3185                         PCI_DMA_FROMDEVICE);
3186
3187                 next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
3188                 next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
3189                 prefetch(get_l2_fhdr(next_rx_buf->data));
3190
3191                 len = rx_hdr->l2_fhdr_pkt_len;
3192                 status = rx_hdr->l2_fhdr_status;
3193
3194                 hdr_len = 0;
3195                 if (status & L2_FHDR_STATUS_SPLIT) {
3196                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3197                         pg_ring_used = 1;
3198                 } else if (len > bp->rx_jumbo_thresh) {
3199                         hdr_len = bp->rx_jumbo_thresh;
3200                         pg_ring_used = 1;
3201                 }
3202
3203                 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3204                                        L2_FHDR_ERRORS_PHY_DECODE |
3205                                        L2_FHDR_ERRORS_ALIGNMENT |
3206                                        L2_FHDR_ERRORS_TOO_SHORT |
3207                                        L2_FHDR_ERRORS_GIANT_FRAME))) {
3208
3209                         bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3210                                           sw_ring_prod);
3211                         if (pg_ring_used) {
3212                                 int pages;
3213
3214                                 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3215
3216                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3217                         }
3218                         goto next_rx;
3219                 }
3220
3221                 len -= 4;
3222
3223                 if (len <= bp->rx_copy_thresh) {
3224                         skb = netdev_alloc_skb(bp->dev, len + 6);
3225                         if (skb == NULL) {
3226                                 bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3227                                                   sw_ring_prod);
3228                                 goto next_rx;
3229                         }
3230
3231                         /* aligned copy */
3232                         memcpy(skb->data,
3233                                (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3234                                len + 6);
3235                         skb_reserve(skb, 6);
3236                         skb_put(skb, len);
3237
3238                         bnx2_reuse_rx_data(bp, rxr, data,
3239                                 sw_ring_cons, sw_ring_prod);
3240
3241                 } else {
3242                         skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3243                                           (sw_ring_cons << 16) | sw_ring_prod);
3244                         if (!skb)
3245                                 goto next_rx;
3246                 }
3247                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3248                     !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3249                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
3250
3251                 skb->protocol = eth_type_trans(skb, bp->dev);
3252
3253                 if (len > (bp->dev->mtu + ETH_HLEN) &&
3254                     skb->protocol != htons(0x8100) &&
3255                     skb->protocol != htons(ETH_P_8021AD)) {
3256
3257                         dev_kfree_skb(skb);
3258                         goto next_rx;
3259
3260                 }
3261
3262                 skb_checksum_none_assert(skb);
3263                 if ((bp->dev->features & NETIF_F_RXCSUM) &&
3264                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3265                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
3266
3267                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3268                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3269                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3270                 }
3271                 if ((bp->dev->features & NETIF_F_RXHASH) &&
3272                     ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3273                      L2_FHDR_STATUS_USE_RXHASH))
3274                         skb_set_hash(skb, rx_hdr->l2_fhdr_hash,
3275                                      PKT_HASH_TYPE_L3);
3276
3277                 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3278                 napi_gro_receive(&bnapi->napi, skb);
3279                 rx_pkt++;
3280
3281 next_rx:
3282                 sw_cons = BNX2_NEXT_RX_BD(sw_cons);
3283                 sw_prod = BNX2_NEXT_RX_BD(sw_prod);
3284
3285                 if ((rx_pkt == budget))
3286                         break;
3287
3288                 /* Refresh hw_cons to see if there is new work */
3289                 if (sw_cons == hw_cons) {
3290                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3291                         rmb();
3292                 }
3293         }
3294         rxr->rx_cons = sw_cons;
3295         rxr->rx_prod = sw_prod;
3296
3297         if (pg_ring_used)
3298                 BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3299
3300         BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3301
3302         BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3303
3304         mmiowb();
3305
3306         return rx_pkt;
3307
3308 }
3309
3310 /* MSI ISR - The only difference between this and the INTx ISR
3311  * is that the MSI interrupt is always serviced.
3312  */
3313 static irqreturn_t
3314 bnx2_msi(int irq, void *dev_instance)
3315 {
3316         struct bnx2_napi *bnapi = dev_instance;
3317         struct bnx2 *bp = bnapi->bp;
3318
3319         prefetch(bnapi->status_blk.msi);
3320         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3321                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3322                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3323
3324         /* Return here if interrupt is disabled. */
3325         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3326                 return IRQ_HANDLED;
3327
3328         napi_schedule(&bnapi->napi);
3329
3330         return IRQ_HANDLED;
3331 }
3332
3333 static irqreturn_t
3334 bnx2_msi_1shot(int irq, void *dev_instance)
3335 {
3336         struct bnx2_napi *bnapi = dev_instance;
3337         struct bnx2 *bp = bnapi->bp;
3338
3339         prefetch(bnapi->status_blk.msi);
3340
3341         /* Return here if interrupt is disabled. */
3342         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3343                 return IRQ_HANDLED;
3344
3345         napi_schedule(&bnapi->napi);
3346
3347         return IRQ_HANDLED;
3348 }
3349
3350 static irqreturn_t
3351 bnx2_interrupt(int irq, void *dev_instance)
3352 {
3353         struct bnx2_napi *bnapi = dev_instance;
3354         struct bnx2 *bp = bnapi->bp;
3355         struct status_block *sblk = bnapi->status_blk.msi;
3356
3357         /* When using INTx, it is possible for the interrupt to arrive
3358          * at the CPU before the status block posted prior to the
3359          * interrupt. Reading a register will flush the status block.
3360          * When using MSI, the MSI message will always complete after
3361          * the status block write.
3362          */
3363         if ((sblk->status_idx == bnapi->last_status_idx) &&
3364             (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3365              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3366                 return IRQ_NONE;
3367
3368         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3369                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3370                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3371
3372         /* Read back to deassert IRQ immediately to avoid too many
3373          * spurious interrupts.
3374          */
3375         BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3376
3377         /* Return here if interrupt is shared and is disabled. */
3378         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3379                 return IRQ_HANDLED;
3380
3381         if (napi_schedule_prep(&bnapi->napi)) {
3382                 bnapi->last_status_idx = sblk->status_idx;
3383                 __napi_schedule(&bnapi->napi);
3384         }
3385
3386         return IRQ_HANDLED;
3387 }
3388
3389 static inline int
3390 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3391 {
3392         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3393         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3394
3395         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3396             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3397                 return 1;
3398         return 0;
3399 }
3400
3401 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3402                                  STATUS_ATTN_BITS_TIMER_ABORT)
3403
3404 static inline int
3405 bnx2_has_work(struct bnx2_napi *bnapi)
3406 {
3407         struct status_block *sblk = bnapi->status_blk.msi;
3408
3409         if (bnx2_has_fast_work(bnapi))
3410                 return 1;
3411
3412 #ifdef BCM_CNIC
3413         if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3414                 return 1;
3415 #endif
3416
3417         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3418             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3419                 return 1;
3420
3421         return 0;
3422 }
3423
3424 static void
3425 bnx2_chk_missed_msi(struct bnx2 *bp)
3426 {
3427         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3428         u32 msi_ctrl;
3429
3430         if (bnx2_has_work(bnapi)) {
3431                 msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3432                 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3433                         return;
3434
3435                 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3436                         BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3437                                 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3438                         BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3439                         bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3440                 }
3441         }
3442
3443         bp->idle_chk_status_idx = bnapi->last_status_idx;
3444 }
3445
3446 #ifdef BCM_CNIC
3447 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3448 {
3449         struct cnic_ops *c_ops;
3450
3451         if (!bnapi->cnic_present)
3452                 return;
3453
3454         rcu_read_lock();
3455         c_ops = rcu_dereference(bp->cnic_ops);
3456         if (c_ops)
3457                 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3458                                                       bnapi->status_blk.msi);
3459         rcu_read_unlock();
3460 }
3461 #endif
3462
3463 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3464 {
3465         struct status_block *sblk = bnapi->status_blk.msi;
3466         u32 status_attn_bits = sblk->status_attn_bits;
3467         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3468
3469         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3470             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3471
3472                 bnx2_phy_int(bp, bnapi);
3473
3474                 /* This is needed to take care of transient status
3475                  * during link changes.
3476                  */
3477                 BNX2_WR(bp, BNX2_HC_COMMAND,
3478                         bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3479                 BNX2_RD(bp, BNX2_HC_COMMAND);
3480         }
3481 }
3482
3483 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3484                           int work_done, int budget)
3485 {
3486         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3487         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3488
3489         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3490                 bnx2_tx_int(bp, bnapi, 0);
3491
3492         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3493                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3494
3495         return work_done;
3496 }
3497
3498 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3499 {
3500         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3501         struct bnx2 *bp = bnapi->bp;
3502         int work_done = 0;
3503         struct status_block_msix *sblk = bnapi->status_blk.msix;
3504
3505         while (1) {
3506                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3507                 if (unlikely(work_done >= budget))
3508                         break;
3509
3510                 bnapi->last_status_idx = sblk->status_idx;
3511                 /* status idx must be read before checking for more work. */
3512                 rmb();
3513                 if (likely(!bnx2_has_fast_work(bnapi))) {
3514
3515                         napi_complete(napi);
3516                         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3517                                 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3518                                 bnapi->last_status_idx);
3519                         break;
3520                 }
3521         }
3522         return work_done;
3523 }
3524
3525 static int bnx2_poll(struct napi_struct *napi, int budget)
3526 {
3527         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3528         struct bnx2 *bp = bnapi->bp;
3529         int work_done = 0;
3530         struct status_block *sblk = bnapi->status_blk.msi;
3531
3532         while (1) {
3533                 bnx2_poll_link(bp, bnapi);
3534
3535                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3536
3537 #ifdef BCM_CNIC
3538                 bnx2_poll_cnic(bp, bnapi);
3539 #endif
3540
3541                 /* bnapi->last_status_idx is used below to tell the hw how
3542                  * much work has been processed, so we must read it before
3543                  * checking for more work.
3544                  */
3545                 bnapi->last_status_idx = sblk->status_idx;
3546
3547                 if (unlikely(work_done >= budget))
3548                         break;
3549
3550                 rmb();
3551                 if (likely(!bnx2_has_work(bnapi))) {
3552                         napi_complete(napi);
3553                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3554                                 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3555                                         BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3556                                         bnapi->last_status_idx);
3557                                 break;
3558                         }
3559                         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3560                                 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3561                                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3562                                 bnapi->last_status_idx);
3563
3564                         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3565                                 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3566                                 bnapi->last_status_idx);
3567                         break;
3568                 }
3569         }
3570
3571         return work_done;
3572 }
3573
3574 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3575  * from set_multicast.
3576  */
3577 static void
3578 bnx2_set_rx_mode(struct net_device *dev)
3579 {
3580         struct bnx2 *bp = netdev_priv(dev);
3581         u32 rx_mode, sort_mode;
3582         struct netdev_hw_addr *ha;
3583         int i;
3584
3585         if (!netif_running(dev))
3586                 return;
3587
3588         spin_lock_bh(&bp->phy_lock);
3589
3590         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3591                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3592         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3593         if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3594              (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3595                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3596         if (dev->flags & IFF_PROMISC) {
3597                 /* Promiscuous mode. */
3598                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3599                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3600                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3601         }
3602         else if (dev->flags & IFF_ALLMULTI) {
3603                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3604                         BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3605                                 0xffffffff);
3606                 }
3607                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3608         }
3609         else {
3610                 /* Accept one or more multicast(s). */
3611                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3612                 u32 regidx;
3613                 u32 bit;
3614                 u32 crc;
3615
3616                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3617
3618                 netdev_for_each_mc_addr(ha, dev) {
3619                         crc = ether_crc_le(ETH_ALEN, ha->addr);
3620                         bit = crc & 0xff;
3621                         regidx = (bit & 0xe0) >> 5;
3622                         bit &= 0x1f;
3623                         mc_filter[regidx] |= (1 << bit);
3624                 }
3625
3626                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3627                         BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3628                                 mc_filter[i]);
3629                 }
3630
3631                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3632         }
3633
3634         if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3635                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3636                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3637                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3638         } else if (!(dev->flags & IFF_PROMISC)) {
3639                 /* Add all entries into to the match filter list */
3640                 i = 0;
3641                 netdev_for_each_uc_addr(ha, dev) {
3642                         bnx2_set_mac_addr(bp, ha->addr,
3643                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3644                         sort_mode |= (1 <<
3645                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3646                         i++;
3647                 }
3648
3649         }
3650
3651         if (rx_mode != bp->rx_mode) {
3652                 bp->rx_mode = rx_mode;
3653                 BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3654         }
3655
3656         BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3657         BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3658         BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3659
3660         spin_unlock_bh(&bp->phy_lock);
3661 }
3662
3663 static int
3664 check_fw_section(const struct firmware *fw,
3665                  const struct bnx2_fw_file_section *section,
3666                  u32 alignment, bool non_empty)
3667 {
3668         u32 offset = be32_to_cpu(section->offset);
3669         u32 len = be32_to_cpu(section->len);
3670
3671         if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3672                 return -EINVAL;
3673         if ((non_empty && len == 0) || len > fw->size - offset ||
3674             len & (alignment - 1))
3675                 return -EINVAL;
3676         return 0;
3677 }
3678
3679 static int
3680 check_mips_fw_entry(const struct firmware *fw,
3681                     const struct bnx2_mips_fw_file_entry *entry)
3682 {
3683         if (check_fw_section(fw, &entry->text, 4, true) ||
3684             check_fw_section(fw, &entry->data, 4, false) ||
3685             check_fw_section(fw, &entry->rodata, 4, false))
3686                 return -EINVAL;
3687         return 0;
3688 }
3689
3690 static void bnx2_release_firmware(struct bnx2 *bp)
3691 {
3692         if (bp->rv2p_firmware) {
3693                 release_firmware(bp->mips_firmware);
3694                 release_firmware(bp->rv2p_firmware);
3695                 bp->rv2p_firmware = NULL;
3696         }
3697 }
3698
3699 static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3700 {
3701         const char *mips_fw_file, *rv2p_fw_file;
3702         const struct bnx2_mips_fw_file *mips_fw;
3703         const struct bnx2_rv2p_fw_file *rv2p_fw;
3704         int rc;
3705
3706         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
3707                 mips_fw_file = FW_MIPS_FILE_09;
3708                 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
3709                     (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
3710                         rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3711                 else
3712                         rv2p_fw_file = FW_RV2P_FILE_09;
3713         } else {
3714                 mips_fw_file = FW_MIPS_FILE_06;
3715                 rv2p_fw_file = FW_RV2P_FILE_06;
3716         }
3717
3718         rc = reject_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3719         if (rc) {
3720                 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3721                 goto out;
3722         }
3723
3724         rc = reject_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3725         if (rc) {
3726                 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3727                 goto err_release_mips_firmware;
3728         }
3729         mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3730         rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3731         if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3732             check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3733             check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3734             check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3735             check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3736             check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3737                 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3738                 rc = -EINVAL;
3739                 goto err_release_firmware;
3740         }
3741         if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3742             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3743             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3744                 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3745                 rc = -EINVAL;
3746                 goto err_release_firmware;
3747         }
3748 out:
3749         return rc;
3750
3751 err_release_firmware:
3752         release_firmware(bp->rv2p_firmware);
3753         bp->rv2p_firmware = NULL;
3754 err_release_mips_firmware:
3755         release_firmware(bp->mips_firmware);
3756         goto out;
3757 }
3758
3759 static int bnx2_request_firmware(struct bnx2 *bp)
3760 {
3761         return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3762 }
3763
3764 static u32
3765 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3766 {
3767         switch (idx) {
3768         case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3769                 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3770                 rv2p_code |= RV2P_BD_PAGE_SIZE;
3771                 break;
3772         }
3773         return rv2p_code;
3774 }
3775
3776 static int
3777 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3778              const struct bnx2_rv2p_fw_file_entry *fw_entry)
3779 {
3780         u32 rv2p_code_len, file_offset;
3781         __be32 *rv2p_code;
3782         int i;
3783         u32 val, cmd, addr;
3784
3785         rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3786         file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3787
3788         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3789
3790         if (rv2p_proc == RV2P_PROC1) {
3791                 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3792                 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3793         } else {
3794                 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3795                 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3796         }
3797
3798         for (i = 0; i < rv2p_code_len; i += 8) {
3799                 BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3800                 rv2p_code++;
3801                 BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3802                 rv2p_code++;
3803
3804                 val = (i / 8) | cmd;
3805                 BNX2_WR(bp, addr, val);
3806         }
3807
3808         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3809         for (i = 0; i < 8; i++) {
3810                 u32 loc, code;
3811
3812                 loc = be32_to_cpu(fw_entry->fixup[i]);
3813                 if (loc && ((loc * 4) < rv2p_code_len)) {
3814                         code = be32_to_cpu(*(rv2p_code + loc - 1));
3815                         BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3816                         code = be32_to_cpu(*(rv2p_code + loc));
3817                         code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3818                         BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3819
3820                         val = (loc / 2) | cmd;
3821                         BNX2_WR(bp, addr, val);
3822                 }
3823         }
3824
3825         /* Reset the processor, un-stall is done later. */
3826         if (rv2p_proc == RV2P_PROC1) {
3827                 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3828         }
3829         else {
3830                 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3831         }
3832
3833         return 0;
3834 }
3835
3836 static int
3837 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3838             const struct bnx2_mips_fw_file_entry *fw_entry)
3839 {
3840         u32 addr, len, file_offset;
3841         __be32 *data;
3842         u32 offset;
3843         u32 val;
3844
3845         /* Halt the CPU. */
3846         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3847         val |= cpu_reg->mode_value_halt;
3848         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3849         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3850
3851         /* Load the Text area. */
3852         addr = be32_to_cpu(fw_entry->text.addr);
3853         len = be32_to_cpu(fw_entry->text.len);
3854         file_offset = be32_to_cpu(fw_entry->text.offset);
3855         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3856
3857         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3858         if (len) {
3859                 int j;
3860
3861                 for (j = 0; j < (len / 4); j++, offset += 4)
3862                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3863         }
3864
3865         /* Load the Data area. */
3866         addr = be32_to_cpu(fw_entry->data.addr);
3867         len = be32_to_cpu(fw_entry->data.len);
3868         file_offset = be32_to_cpu(fw_entry->data.offset);
3869         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3870
3871         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3872         if (len) {
3873                 int j;
3874
3875                 for (j = 0; j < (len / 4); j++, offset += 4)
3876                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3877         }
3878
3879         /* Load the Read-Only area. */
3880         addr = be32_to_cpu(fw_entry->rodata.addr);
3881         len = be32_to_cpu(fw_entry->rodata.len);
3882         file_offset = be32_to_cpu(fw_entry->rodata.offset);
3883         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3884
3885         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3886         if (len) {
3887                 int j;
3888
3889                 for (j = 0; j < (len / 4); j++, offset += 4)
3890                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3891         }
3892
3893         /* Clear the pre-fetch instruction. */
3894         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3895
3896         val = be32_to_cpu(fw_entry->start_addr);
3897         bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3898
3899         /* Start the CPU. */
3900         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3901         val &= ~cpu_reg->mode_value_halt;
3902         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3903         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3904
3905         return 0;
3906 }
3907
3908 static int
3909 bnx2_init_cpus(struct bnx2 *bp)
3910 {
3911         const struct bnx2_mips_fw_file *mips_fw =
3912                 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3913         const struct bnx2_rv2p_fw_file *rv2p_fw =
3914                 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3915         int rc;
3916
3917         /* Initialize the RV2P processor. */
3918         load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3919         load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3920
3921         /* Initialize the RX Processor. */
3922         rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3923         if (rc)
3924                 goto init_cpu_err;
3925
3926         /* Initialize the TX Processor. */
3927         rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3928         if (rc)
3929                 goto init_cpu_err;
3930
3931         /* Initialize the TX Patch-up Processor. */
3932         rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3933         if (rc)
3934                 goto init_cpu_err;
3935
3936         /* Initialize the Completion Processor. */
3937         rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3938         if (rc)
3939                 goto init_cpu_err;
3940
3941         /* Initialize the Command Processor. */
3942         rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3943
3944 init_cpu_err:
3945         return rc;
3946 }
3947
3948 static void
3949 bnx2_setup_wol(struct bnx2 *bp)
3950 {
3951         int i;
3952         u32 val, wol_msg;
3953
3954         if (bp->wol) {
3955                 u32 advertising;
3956                 u8 autoneg;
3957
3958                 autoneg = bp->autoneg;
3959                 advertising = bp->advertising;
3960
3961                 if (bp->phy_port == PORT_TP) {
3962                         bp->autoneg = AUTONEG_SPEED;
3963                         bp->advertising = ADVERTISED_10baseT_Half |
3964                                 ADVERTISED_10baseT_Full |
3965                                 ADVERTISED_100baseT_Half |
3966                                 ADVERTISED_100baseT_Full |
3967                                 ADVERTISED_Autoneg;
3968                 }
3969
3970                 spin_lock_bh(&bp->phy_lock);
3971                 bnx2_setup_phy(bp, bp->phy_port);
3972                 spin_unlock_bh(&bp->phy_lock);
3973
3974                 bp->autoneg = autoneg;
3975                 bp->advertising = advertising;
3976
3977                 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3978
3979                 val = BNX2_RD(bp, BNX2_EMAC_MODE);
3980
3981                 /* Enable port mode. */
3982                 val &= ~BNX2_EMAC_MODE_PORT;
3983                 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3984                        BNX2_EMAC_MODE_ACPI_RCVD |
3985                        BNX2_EMAC_MODE_MPKT;
3986                 if (bp->phy_port == PORT_TP) {
3987                         val |= BNX2_EMAC_MODE_PORT_MII;
3988                 } else {
3989                         val |= BNX2_EMAC_MODE_PORT_GMII;
3990                         if (bp->line_speed == SPEED_2500)
3991                                 val |= BNX2_EMAC_MODE_25G_MODE;
3992                 }
3993
3994                 BNX2_WR(bp, BNX2_EMAC_MODE, val);
3995
3996                 /* receive all multicast */
3997                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3998                         BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3999                                 0xffffffff);
4000                 }
4001                 BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
4002
4003                 val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
4004                 BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
4005                 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
4006                 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
4007
4008                 /* Need to enable EMAC and RPM for WOL. */
4009                 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4010                         BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
4011                         BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
4012                         BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
4013
4014                 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4015                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4016                 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4017
4018                 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4019         } else {
4020                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4021         }
4022
4023         if (!(bp->flags & BNX2_FLAG_NO_WOL)) {
4024                 u32 val;
4025
4026                 wol_msg |= BNX2_DRV_MSG_DATA_WAIT3;
4027                 if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) {
4028                         bnx2_fw_sync(bp, wol_msg, 1, 0);
4029                         return;
4030                 }
4031                 /* Tell firmware not to power down the PHY yet, otherwise
4032                  * the chip will take a long time to respond to MMIO reads.
4033                  */
4034                 val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
4035                 bnx2_shmem_wr(bp, BNX2_PORT_FEATURE,
4036                               val | BNX2_PORT_FEATURE_ASF_ENABLED);
4037                 bnx2_fw_sync(bp, wol_msg, 1, 0);
4038                 bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val);
4039         }
4040
4041 }
4042
4043 static int
4044 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
4045 {
4046         switch (state) {
4047         case PCI_D0: {
4048                 u32 val;
4049
4050                 pci_enable_wake(bp->pdev, PCI_D0, false);
4051                 pci_set_power_state(bp->pdev, PCI_D0);
4052
4053                 val = BNX2_RD(bp, BNX2_EMAC_MODE);
4054                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
4055                 val &= ~BNX2_EMAC_MODE_MPKT;
4056                 BNX2_WR(bp, BNX2_EMAC_MODE, val);
4057
4058                 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4059                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4060                 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4061                 break;
4062         }
4063         case PCI_D3hot: {
4064                 bnx2_setup_wol(bp);
4065                 pci_wake_from_d3(bp->pdev, bp->wol);
4066                 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4067                     (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
4068
4069                         if (bp->wol)
4070                                 pci_set_power_state(bp->pdev, PCI_D3hot);
4071                         break;
4072
4073                 }
4074                 if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4075                         u32 val;
4076
4077                         /* Tell firmware not to power down the PHY yet,
4078                          * otherwise the other port may not respond to
4079                          * MMIO reads.
4080                          */
4081                         val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
4082                         val &= ~BNX2_CONDITION_PM_STATE_MASK;
4083                         val |= BNX2_CONDITION_PM_STATE_UNPREP;
4084                         bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val);
4085                 }
4086                 pci_set_power_state(bp->pdev, PCI_D3hot);
4087
4088                 /* No more memory access after this point until
4089                  * device is brought back to D0.
4090                  */
4091                 break;
4092         }
4093         default:
4094                 return -EINVAL;
4095         }
4096         return 0;
4097 }
4098
4099 static int
4100 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4101 {
4102         u32 val;
4103         int j;
4104
4105         /* Request access to the flash interface. */
4106         BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4107         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4108                 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4109                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4110                         break;
4111
4112                 udelay(5);
4113         }
4114
4115         if (j >= NVRAM_TIMEOUT_COUNT)
4116                 return -EBUSY;
4117
4118         return 0;
4119 }
4120
4121 static int
4122 bnx2_release_nvram_lock(struct bnx2 *bp)
4123 {
4124         int j;
4125         u32 val;
4126
4127         /* Relinquish nvram interface. */
4128         BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4129
4130         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4131                 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4132                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4133                         break;
4134
4135                 udelay(5);
4136         }
4137
4138         if (j >= NVRAM_TIMEOUT_COUNT)
4139                 return -EBUSY;
4140
4141         return 0;
4142 }
4143
4144
4145 static int
4146 bnx2_enable_nvram_write(struct bnx2 *bp)
4147 {
4148         u32 val;
4149
4150         val = BNX2_RD(bp, BNX2_MISC_CFG);
4151         BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4152
4153         if (bp->flash_info->flags & BNX2_NV_WREN) {
4154                 int j;
4155
4156                 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4157                 BNX2_WR(bp, BNX2_NVM_COMMAND,
4158                         BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4159
4160                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4161                         udelay(5);
4162
4163                         val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4164                         if (val & BNX2_NVM_COMMAND_DONE)
4165                                 break;
4166                 }
4167
4168                 if (j >= NVRAM_TIMEOUT_COUNT)
4169                         return -EBUSY;
4170         }
4171         return 0;
4172 }
4173
4174 static void
4175 bnx2_disable_nvram_write(struct bnx2 *bp)
4176 {
4177         u32 val;
4178
4179         val = BNX2_RD(bp, BNX2_MISC_CFG);
4180         BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4181 }
4182
4183
4184 static void
4185 bnx2_enable_nvram_access(struct bnx2 *bp)
4186 {
4187         u32 val;
4188
4189         val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4190         /* Enable both bits, even on read. */
4191         BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4192                 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4193 }
4194
4195 static void
4196 bnx2_disable_nvram_access(struct bnx2 *bp)
4197 {
4198         u32 val;
4199
4200         val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4201         /* Disable both bits, even after read. */
4202         BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4203                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4204                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
4205 }
4206
4207 static int
4208 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4209 {
4210         u32 cmd;
4211         int j;
4212
4213         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4214                 /* Buffered flash, no erase needed */
4215                 return 0;
4216
4217         /* Build an erase command */
4218         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4219               BNX2_NVM_COMMAND_DOIT;
4220
4221         /* Need to clear DONE bit separately. */
4222         BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4223
4224         /* Address of the NVRAM to read from. */
4225         BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4226
4227         /* Issue an erase command. */
4228         BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4229
4230         /* Wait for completion. */
4231         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4232                 u32 val;
4233
4234                 udelay(5);
4235
4236                 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4237                 if (val & BNX2_NVM_COMMAND_DONE)
4238                         break;
4239         }
4240
4241         if (j >= NVRAM_TIMEOUT_COUNT)
4242                 return -EBUSY;
4243
4244         return 0;
4245 }
4246
4247 static int
4248 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4249 {
4250         u32 cmd;
4251         int j;
4252
4253         /* Build the command word. */
4254         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4255
4256         /* Calculate an offset of a buffered flash, not needed for 5709. */
4257         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4258                 offset = ((offset / bp->flash_info->page_size) <<
4259                            bp->flash_info->page_bits) +
4260                           (offset % bp->flash_info->page_size);
4261         }
4262
4263         /* Need to clear DONE bit separately. */
4264         BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4265
4266         /* Address of the NVRAM to read from. */
4267         BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4268
4269         /* Issue a read command. */
4270         BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4271
4272         /* Wait for completion. */
4273         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4274                 u32 val;
4275
4276                 udelay(5);
4277
4278                 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4279                 if (val & BNX2_NVM_COMMAND_DONE) {
4280                         __be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
4281                         memcpy(ret_val, &v, 4);
4282                         break;
4283                 }
4284         }
4285         if (j >= NVRAM_TIMEOUT_COUNT)
4286                 return -EBUSY;
4287
4288         return 0;
4289 }
4290
4291
4292 static int
4293 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4294 {
4295         u32 cmd;
4296         __be32 val32;
4297         int j;
4298
4299         /* Build the command word. */
4300         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4301
4302         /* Calculate an offset of a buffered flash, not needed for 5709. */
4303         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4304                 offset = ((offset / bp->flash_info->page_size) <<
4305                           bp->flash_info->page_bits) +
4306                          (offset % bp->flash_info->page_size);
4307         }
4308
4309         /* Need to clear DONE bit separately. */
4310         BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4311
4312         memcpy(&val32, val, 4);
4313
4314         /* Write the data. */
4315         BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4316
4317         /* Address of the NVRAM to write to. */
4318         BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4319
4320         /* Issue the write command. */
4321         BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4322
4323         /* Wait for completion. */
4324         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4325                 udelay(5);
4326
4327                 if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4328                         break;
4329         }
4330         if (j >= NVRAM_TIMEOUT_COUNT)
4331                 return -EBUSY;
4332
4333         return 0;
4334 }
4335
4336 static int
4337 bnx2_init_nvram(struct bnx2 *bp)
4338 {
4339         u32 val;
4340         int j, entry_count, rc = 0;
4341         const struct flash_spec *flash;
4342
4343         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4344                 bp->flash_info = &flash_5709;
4345                 goto get_flash_size;
4346         }
4347
4348         /* Determine the selected interface. */
4349         val = BNX2_RD(bp, BNX2_NVM_CFG1);
4350
4351         entry_count = ARRAY_SIZE(flash_table);
4352
4353         if (val & 0x40000000) {
4354
4355                 /* Flash interface has been reconfigured */
4356                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4357                      j++, flash++) {
4358                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
4359                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4360                                 bp->flash_info = flash;
4361                                 break;
4362                         }
4363                 }
4364         }
4365         else {
4366                 u32 mask;
4367                 /* Not yet been reconfigured */
4368
4369                 if (val & (1 << 23))
4370                         mask = FLASH_BACKUP_STRAP_MASK;
4371                 else
4372                         mask = FLASH_STRAP_MASK;
4373
4374                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4375                         j++, flash++) {
4376
4377                         if ((val & mask) == (flash->strapping & mask)) {
4378                                 bp->flash_info = flash;
4379
4380                                 /* Request access to the flash interface. */
4381                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4382                                         return rc;
4383
4384                                 /* Enable access to flash interface */
4385                                 bnx2_enable_nvram_access(bp);
4386
4387                                 /* Reconfigure the flash interface */
4388                                 BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
4389                                 BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
4390                                 BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
4391                                 BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4392
4393                                 /* Disable access to flash interface */
4394                                 bnx2_disable_nvram_access(bp);
4395                                 bnx2_release_nvram_lock(bp);
4396
4397                                 break;
4398                         }
4399                 }
4400         } /* if (val & 0x40000000) */
4401
4402         if (j == entry_count) {
4403                 bp->flash_info = NULL;
4404                 pr_alert("Unknown flash/EEPROM type\n");
4405                 return -ENODEV;
4406         }
4407
4408 get_flash_size:
4409         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4410         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4411         if (val)
4412                 bp->flash_size = val;
4413         else
4414                 bp->flash_size = bp->flash_info->total_size;
4415
4416         return rc;
4417 }
4418
4419 static int
4420 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4421                 int buf_size)
4422 {
4423         int rc = 0;
4424         u32 cmd_flags, offset32, len32, extra;
4425
4426         if (buf_size == 0)
4427                 return 0;
4428
4429         /* Request access to the flash interface. */
4430         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4431                 return rc;
4432
4433         /* Enable access to flash interface */
4434         bnx2_enable_nvram_access(bp);
4435
4436         len32 = buf_size;
4437         offset32 = offset;
4438         extra = 0;
4439
4440         cmd_flags = 0;
4441
4442         if (offset32 & 3) {
4443                 u8 buf[4];
4444                 u32 pre_len;
4445
4446                 offset32 &= ~3;
4447                 pre_len = 4 - (offset & 3);
4448
4449                 if (pre_len >= len32) {
4450                         pre_len = len32;
4451                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4452                                     BNX2_NVM_COMMAND_LAST;
4453                 }
4454                 else {
4455                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4456                 }
4457
4458                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4459
4460                 if (rc)
4461                         return rc;
4462
4463                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4464
4465                 offset32 += 4;
4466                 ret_buf += pre_len;
4467                 len32 -= pre_len;
4468         }
4469         if (len32 & 3) {
4470                 extra = 4 - (len32 & 3);
4471                 len32 = (len32 + 4) & ~3;
4472         }
4473
4474         if (len32 == 4) {
4475                 u8 buf[4];
4476
4477                 if (cmd_flags)
4478                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4479                 else
4480                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4481                                     BNX2_NVM_COMMAND_LAST;
4482
4483                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4484
4485                 memcpy(ret_buf, buf, 4 - extra);
4486         }
4487         else if (len32 > 0) {
4488                 u8 buf[4];
4489
4490                 /* Read the first word. */
4491                 if (cmd_flags)
4492                         cmd_flags = 0;
4493                 else
4494                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4495
4496                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4497
4498                 /* Advance to the next dword. */
4499                 offset32 += 4;
4500                 ret_buf += 4;
4501                 len32 -= 4;
4502
4503                 while (len32 > 4 && rc == 0) {
4504                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4505
4506                         /* Advance to the next dword. */
4507                         offset32 += 4;
4508                         ret_buf += 4;
4509                         len32 -= 4;
4510                 }
4511
4512                 if (rc)
4513                         return rc;
4514
4515                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4516                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4517
4518                 memcpy(ret_buf, buf, 4 - extra);
4519         }
4520
4521         /* Disable access to flash interface */
4522         bnx2_disable_nvram_access(bp);
4523
4524         bnx2_release_nvram_lock(bp);
4525
4526         return rc;
4527 }
4528
4529 static int
4530 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4531                 int buf_size)
4532 {
4533         u32 written, offset32, len32;
4534         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4535         int rc = 0;
4536         int align_start, align_end;
4537
4538         buf = data_buf;
4539         offset32 = offset;
4540         len32 = buf_size;
4541         align_start = align_end = 0;
4542
4543         if ((align_start = (offset32 & 3))) {
4544                 offset32 &= ~3;
4545                 len32 += align_start;
4546                 if (len32 < 4)
4547                         len32 = 4;
4548                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4549                         return rc;
4550         }
4551
4552         if (len32 & 3) {
4553                 align_end = 4 - (len32 & 3);
4554                 len32 += align_end;
4555                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4556                         return rc;
4557         }
4558
4559         if (align_start || align_end) {
4560                 align_buf = kmalloc(len32, GFP_KERNEL);
4561                 if (align_buf == NULL)
4562                         return -ENOMEM;
4563                 if (align_start) {
4564                         memcpy(align_buf, start, 4);
4565                 }
4566                 if (align_end) {
4567                         memcpy(align_buf + len32 - 4, end, 4);
4568                 }
4569                 memcpy(align_buf + align_start, data_buf, buf_size);
4570                 buf = align_buf;
4571         }
4572
4573         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4574                 flash_buffer = kmalloc(264, GFP_KERNEL);
4575                 if (flash_buffer == NULL) {
4576                         rc = -ENOMEM;
4577                         goto nvram_write_end;
4578                 }
4579         }
4580
4581         written = 0;
4582         while ((written < len32) && (rc == 0)) {
4583                 u32 page_start, page_end, data_start, data_end;
4584                 u32 addr, cmd_flags;
4585                 int i;
4586
4587                 /* Find the page_start addr */
4588                 page_start = offset32 + written;
4589                 page_start -= (page_start % bp->flash_info->page_size);
4590                 /* Find the page_end addr */
4591                 page_end = page_start + bp->flash_info->page_size;
4592                 /* Find the data_start addr */
4593                 data_start = (written == 0) ? offset32 : page_start;
4594                 /* Find the data_end addr */
4595                 data_end = (page_end > offset32 + len32) ?
4596                         (offset32 + len32) : page_end;
4597
4598                 /* Request access to the flash interface. */
4599                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4600                         goto nvram_write_end;
4601
4602                 /* Enable access to flash interface */
4603                 bnx2_enable_nvram_access(bp);
4604
4605                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4606                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4607                         int j;
4608
4609                         /* Read the whole page into the buffer
4610                          * (non-buffer flash only) */
4611                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4612                                 if (j == (bp->flash_info->page_size - 4)) {
4613                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4614                                 }
4615                                 rc = bnx2_nvram_read_dword(bp,
4616                                         page_start + j,
4617                                         &flash_buffer[j],
4618                                         cmd_flags);
4619
4620                                 if (rc)
4621                                         goto nvram_write_end;
4622
4623                                 cmd_flags = 0;
4624                         }
4625                 }
4626
4627                 /* Enable writes to flash interface (unlock write-protect) */
4628                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4629                         goto nvram_write_end;
4630
4631                 /* Loop to write back the buffer data from page_start to
4632                  * data_start */
4633                 i = 0;
4634                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4635                         /* Erase the page */
4636                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4637                                 goto nvram_write_end;
4638
4639                         /* Re-enable the write again for the actual write */
4640                         bnx2_enable_nvram_write(bp);
4641
4642                         for (addr = page_start; addr < data_start;
4643                                 addr += 4, i += 4) {
4644
4645                                 rc = bnx2_nvram_write_dword(bp, addr,
4646                                         &flash_buffer[i], cmd_flags);
4647
4648                                 if (rc != 0)
4649                                         goto nvram_write_end;
4650
4651                                 cmd_flags = 0;
4652                         }
4653                 }
4654
4655                 /* Loop to write the new data from data_start to data_end */
4656                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4657                         if ((addr == page_end - 4) ||
4658                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4659                                  (addr == data_end - 4))) {
4660
4661                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4662                         }
4663                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4664                                 cmd_flags);
4665
4666                         if (rc != 0)
4667                                 goto nvram_write_end;
4668
4669                         cmd_flags = 0;
4670                         buf += 4;
4671                 }
4672
4673                 /* Loop to write back the buffer data from data_end
4674                  * to page_end */
4675                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4676                         for (addr = data_end; addr < page_end;
4677                                 addr += 4, i += 4) {
4678
4679                                 if (addr == page_end-4) {
4680                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4681                                 }
4682                                 rc = bnx2_nvram_write_dword(bp, addr,
4683                                         &flash_buffer[i], cmd_flags);
4684
4685                                 if (rc != 0)
4686                                         goto nvram_write_end;
4687
4688                                 cmd_flags = 0;
4689                         }
4690                 }
4691
4692                 /* Disable writes to flash interface (lock write-protect) */
4693                 bnx2_disable_nvram_write(bp);
4694
4695                 /* Disable access to flash interface */
4696                 bnx2_disable_nvram_access(bp);
4697                 bnx2_release_nvram_lock(bp);
4698
4699                 /* Increment written */
4700                 written += data_end - data_start;
4701         }
4702
4703 nvram_write_end:
4704         kfree(flash_buffer);
4705         kfree(align_buf);
4706         return rc;
4707 }
4708
4709 static void
4710 bnx2_init_fw_cap(struct bnx2 *bp)
4711 {
4712         u32 val, sig = 0;
4713
4714         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4715         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4716
4717         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4718                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4719
4720         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4721         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4722                 return;
4723
4724         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4725                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4726                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4727         }
4728
4729         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4730             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4731                 u32 link;
4732
4733                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4734
4735                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4736                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4737                         bp->phy_port = PORT_FIBRE;
4738                 else
4739                         bp->phy_port = PORT_TP;
4740
4741                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4742                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4743         }
4744
4745         if (netif_running(bp->dev) && sig)
4746                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4747 }
4748
4749 static void
4750 bnx2_setup_msix_tbl(struct bnx2 *bp)
4751 {
4752         BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4753
4754         BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4755         BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4756 }
4757
4758 static int
4759 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4760 {
4761         u32 val;
4762         int i, rc = 0;
4763         u8 old_port;
4764
4765         /* Wait for the current PCI transaction to complete before
4766          * issuing a reset. */
4767         if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
4768             (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
4769                 BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4770                         BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4771                         BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4772                         BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4773                         BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4774                 val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4775                 udelay(5);
4776         } else {  /* 5709 */
4777                 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4778                 val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4779                 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4780                 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4781
4782                 for (i = 0; i < 100; i++) {
4783                         msleep(1);
4784                         val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4785                         if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4786                                 break;
4787                 }
4788         }
4789
4790         /* Wait for the firmware to tell us it is ok to issue a reset. */
4791         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4792
4793         /* Deposit a driver reset signature so the firmware knows that
4794          * this is a soft reset. */
4795         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4796                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4797
4798         /* Do a dummy read to force the chip to complete all current transaction
4799          * before we issue a reset. */
4800         val = BNX2_RD(bp, BNX2_MISC_ID);
4801
4802         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4803                 BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4804                 BNX2_RD(bp, BNX2_MISC_COMMAND);
4805                 udelay(5);
4806
4807                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4808                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4809
4810                 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4811
4812         } else {
4813                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4814                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4815                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4816
4817                 /* Chip reset. */
4818                 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4819
4820                 /* Reading back any register after chip reset will hang the
4821                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4822                  * of margin for write posting.
4823                  */
4824                 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4825                     (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
4826                         msleep(20);
4827
4828                 /* Reset takes approximate 30 usec */
4829                 for (i = 0; i < 10; i++) {
4830                         val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4831                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4832                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4833                                 break;
4834                         udelay(10);
4835                 }
4836
4837                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4838                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4839                         pr_err("Chip reset did not complete\n");
4840                         return -EBUSY;
4841                 }
4842         }
4843
4844         /* Make sure byte swapping is properly configured. */
4845         val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
4846         if (val != 0x01020304) {
4847                 pr_err("Chip not in correct endian mode\n");
4848                 return -ENODEV;
4849         }
4850
4851         /* Wait for the firmware to finish its initialization. */
4852         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4853         if (rc)
4854                 return rc;
4855
4856         spin_lock_bh(&bp->phy_lock);
4857         old_port = bp->phy_port;
4858         bnx2_init_fw_cap(bp);
4859         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4860             old_port != bp->phy_port)
4861                 bnx2_set_default_remote_link(bp);
4862         spin_unlock_bh(&bp->phy_lock);
4863
4864         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4865                 /* Adjust the voltage regular to two steps lower.  The default
4866                  * of this register is 0x0000000e. */
4867                 BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4868
4869                 /* Remove bad rbuf memory from the free pool. */
4870                 rc = bnx2_alloc_bad_rbuf(bp);
4871         }
4872
4873         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4874                 bnx2_setup_msix_tbl(bp);
4875                 /* Prevent MSIX table reads and write from timing out */
4876                 BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
4877                         BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4878         }
4879
4880         return rc;
4881 }
4882
4883 static int
4884 bnx2_init_chip(struct bnx2 *bp)
4885 {
4886         u32 val, mtu;
4887         int rc, i;
4888
4889         /* Make sure the interrupt is not active. */
4890         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4891
4892         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4893               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4894 #ifdef __BIG_ENDIAN
4895               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4896 #endif
4897               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4898               DMA_READ_CHANS << 12 |
4899               DMA_WRITE_CHANS << 16;
4900
4901         val |= (0x2 << 20) | (1 << 11);
4902
4903         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4904                 val |= (1 << 23);
4905
4906         if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
4907             (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
4908             !(bp->flags & BNX2_FLAG_PCIX))
4909                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4910
4911         BNX2_WR(bp, BNX2_DMA_CONFIG, val);
4912
4913         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4914                 val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
4915                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4916                 BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
4917         }
4918
4919         if (bp->flags & BNX2_FLAG_PCIX) {
4920                 u16 val16;
4921
4922                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4923                                      &val16);
4924                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4925                                       val16 & ~PCI_X_CMD_ERO);
4926         }
4927
4928         BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4929                 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4930                 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4931                 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4932
4933         /* Initialize context mapping and zero out the quick contexts.  The
4934          * context block must have already been enabled. */
4935         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4936                 rc = bnx2_init_5709_context(bp);
4937                 if (rc)
4938                         return rc;
4939         } else
4940                 bnx2_init_context(bp);
4941
4942         if ((rc = bnx2_init_cpus(bp)) != 0)
4943                 return rc;
4944
4945         bnx2_init_nvram(bp);
4946
4947         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4948
4949         val = BNX2_RD(bp, BNX2_MQ_CONFIG);
4950         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4951         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4952         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4953                 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4954                 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
4955                         val |= BNX2_MQ_CONFIG_HALT_DIS;
4956         }
4957
4958         BNX2_WR(bp, BNX2_MQ_CONFIG, val);
4959
4960         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4961         BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4962         BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4963
4964         val = (BNX2_PAGE_BITS - 8) << 24;
4965         BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4966
4967         /* Configure page size. */
4968         val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4969         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4970         val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
4971         BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4972
4973         val = bp->mac_addr[0] +
4974               (bp->mac_addr[1] << 8) +
4975               (bp->mac_addr[2] << 16) +
4976               bp->mac_addr[3] +
4977               (bp->mac_addr[4] << 8) +
4978               (bp->mac_addr[5] << 16);
4979         BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4980
4981         /* Program the MTU.  Also include 4 bytes for CRC32. */
4982         mtu = bp->dev->mtu;
4983         val = mtu + ETH_HLEN + ETH_FCS_LEN;
4984         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4985                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4986         BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4987
4988         if (mtu < 1500)
4989                 mtu = 1500;
4990
4991         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4992         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4993         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4994
4995         memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4996         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4997                 bp->bnx2_napi[i].last_status_idx = 0;
4998
4999         bp->idle_chk_status_idx = 0xffff;
5000
5001         /* Set up how to generate a link change interrupt. */
5002         BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
5003
5004         BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
5005                 (u64) bp->status_blk_mapping & 0xffffffff);
5006         BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
5007
5008         BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
5009                 (u64) bp->stats_blk_mapping & 0xffffffff);
5010         BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
5011                 (u64) bp->stats_blk_mapping >> 32);
5012
5013         BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
5014                 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
5015
5016         BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
5017                 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
5018
5019         BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
5020                 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
5021
5022         BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
5023
5024         BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
5025
5026         BNX2_WR(bp, BNX2_HC_COM_TICKS,
5027                 (bp->com_ticks_int << 16) | bp->com_ticks);
5028
5029         BNX2_WR(bp, BNX2_HC_CMD_TICKS,
5030                 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
5031
5032         if (bp->flags & BNX2_FLAG_BROKEN_STATS)
5033                 BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
5034         else
5035                 BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
5036         BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
5037
5038         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
5039                 val = BNX2_HC_CONFIG_COLLECT_STATS;
5040         else {
5041                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
5042                       BNX2_HC_CONFIG_COLLECT_STATS;
5043         }
5044
5045         if (bp->flags & BNX2_FLAG_USING_MSIX) {
5046                 BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
5047                         BNX2_HC_MSIX_BIT_VECTOR_VAL);
5048
5049                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
5050         }
5051
5052         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
5053                 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
5054
5055         BNX2_WR(bp, BNX2_HC_CONFIG, val);
5056
5057         if (bp->rx_ticks < 25)
5058                 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5059         else
5060                 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5061
5062         for (i = 1; i < bp->irq_nvecs; i++) {
5063                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5064                            BNX2_HC_SB_CONFIG_1;
5065
5066                 BNX2_WR(bp, base,
5067                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5068                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5069                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5070
5071                 BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5072                         (bp->tx_quick_cons_trip_int << 16) |
5073                          bp->tx_quick_cons_trip);
5074
5075                 BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5076                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
5077
5078                 BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5079                         (bp->rx_quick_cons_trip_int << 16) |
5080                         bp->rx_quick_cons_trip);
5081
5082                 BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5083                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
5084         }
5085
5086         /* Clear internal stats counters. */
5087         BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5088
5089         BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5090
5091         /* Initialize the receive filter. */
5092         bnx2_set_rx_mode(bp->dev);
5093
5094         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5095                 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5096                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5097                 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5098         }
5099         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5100                           1, 0);
5101
5102         BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5103         BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5104
5105         udelay(20);
5106
5107         bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
5108
5109         return rc;
5110 }
5111
5112 static void
5113 bnx2_clear_ring_states(struct bnx2 *bp)
5114 {
5115         struct bnx2_napi *bnapi;
5116         struct bnx2_tx_ring_info *txr;
5117         struct bnx2_rx_ring_info *rxr;
5118         int i;
5119
5120         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5121                 bnapi = &bp->bnx2_napi[i];
5122                 txr = &bnapi->tx_ring;
5123                 rxr = &bnapi->rx_ring;
5124
5125                 txr->tx_cons = 0;
5126                 txr->hw_tx_cons = 0;
5127                 rxr->rx_prod_bseq = 0;
5128                 rxr->rx_prod = 0;
5129                 rxr->rx_cons = 0;
5130                 rxr->rx_pg_prod = 0;
5131                 rxr->rx_pg_cons = 0;
5132         }
5133 }
5134
5135 static void
5136 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5137 {
5138         u32 val, offset0, offset1, offset2, offset3;
5139         u32 cid_addr = GET_CID_ADDR(cid);
5140
5141         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5142                 offset0 = BNX2_L2CTX_TYPE_XI;
5143                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5144                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5145                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5146         } else {
5147                 offset0 = BNX2_L2CTX_TYPE;
5148                 offset1 = BNX2_L2CTX_CMD_TYPE;
5149                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5150                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5151         }
5152         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5153         bnx2_ctx_wr(bp, cid_addr, offset0, val);
5154
5155         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5156         bnx2_ctx_wr(bp, cid_addr, offset1, val);
5157
5158         val = (u64) txr->tx_desc_mapping >> 32;
5159         bnx2_ctx_wr(bp, cid_addr, offset2, val);
5160
5161         val = (u64) txr->tx_desc_mapping & 0xffffffff;
5162         bnx2_ctx_wr(bp, cid_addr, offset3, val);
5163 }
5164
5165 static void
5166 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5167 {
5168         struct bnx2_tx_bd *txbd;
5169         u32 cid = TX_CID;
5170         struct bnx2_napi *bnapi;
5171         struct bnx2_tx_ring_info *txr;
5172
5173         bnapi = &bp->bnx2_napi[ring_num];
5174         txr = &bnapi->tx_ring;
5175
5176         if (ring_num == 0)
5177                 cid = TX_CID;
5178         else
5179                 cid = TX_TSS_CID + ring_num - 1;
5180
5181         bp->tx_wake_thresh = bp->tx_ring_size / 2;
5182
5183         txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
5184
5185         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5186         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5187
5188         txr->tx_prod = 0;
5189         txr->tx_prod_bseq = 0;
5190
5191         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5192         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5193
5194         bnx2_init_tx_context(bp, cid, txr);
5195 }
5196
5197 static void
5198 bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
5199                      u32 buf_size, int num_rings)
5200 {
5201         int i;
5202         struct bnx2_rx_bd *rxbd;
5203
5204         for (i = 0; i < num_rings; i++) {
5205                 int j;
5206
5207                 rxbd = &rx_ring[i][0];
5208                 for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
5209                         rxbd->rx_bd_len = buf_size;
5210                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5211                 }
5212                 if (i == (num_rings - 1))
5213                         j = 0;
5214                 else
5215                         j = i + 1;
5216                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5217                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5218         }
5219 }
5220
5221 static void
5222 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5223 {
5224         int i;
5225         u16 prod, ring_prod;
5226         u32 cid, rx_cid_addr, val;
5227         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5228         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5229
5230         if (ring_num == 0)
5231                 cid = RX_CID;
5232         else
5233                 cid = RX_RSS_CID + ring_num - 1;
5234
5235         rx_cid_addr = GET_CID_ADDR(cid);
5236
5237         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5238                              bp->rx_buf_use_size, bp->rx_max_ring);
5239
5240         bnx2_init_rx_context(bp, cid);
5241
5242         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5243                 val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
5244                 BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5245         }
5246
5247         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5248         if (bp->rx_pg_ring_size) {
5249                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5250                                      rxr->rx_pg_desc_mapping,
5251                                      PAGE_SIZE, bp->rx_max_pg_ring);
5252                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5253                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5254                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5255                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5256
5257                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5258                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5259
5260                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5261                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5262
5263                 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5264                         BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5265         }
5266
5267         val = (u64) rxr->rx_desc_mapping[0] >> 32;
5268         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5269
5270         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5271         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5272
5273         ring_prod = prod = rxr->rx_pg_prod;
5274         for (i = 0; i < bp->rx_pg_ring_size; i++) {
5275                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5276                         netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5277                                     ring_num, i, bp->rx_pg_ring_size);
5278                         break;
5279                 }
5280                 prod = BNX2_NEXT_RX_BD(prod);
5281                 ring_prod = BNX2_RX_PG_RING_IDX(prod);
5282         }
5283         rxr->rx_pg_prod = prod;
5284
5285         ring_prod = prod = rxr->rx_prod;
5286         for (i = 0; i < bp->rx_ring_size; i++) {
5287                 if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5288                         netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5289                                     ring_num, i, bp->rx_ring_size);
5290                         break;
5291                 }
5292                 prod = BNX2_NEXT_RX_BD(prod);
5293                 ring_prod = BNX2_RX_RING_IDX(prod);
5294         }
5295         rxr->rx_prod = prod;
5296
5297         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5298         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5299         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5300
5301         BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5302         BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
5303
5304         BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5305 }
5306
5307 static void
5308 bnx2_init_all_rings(struct bnx2 *bp)
5309 {
5310         int i;
5311         u32 val;
5312
5313         bnx2_clear_ring_states(bp);
5314
5315         BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5316         for (i = 0; i < bp->num_tx_rings; i++)
5317                 bnx2_init_tx_ring(bp, i);
5318
5319         if (bp->num_tx_rings > 1)
5320                 BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5321                         (TX_TSS_CID << 7));
5322
5323         BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5324         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5325
5326         for (i = 0; i < bp->num_rx_rings; i++)
5327                 bnx2_init_rx_ring(bp, i);
5328
5329         if (bp->num_rx_rings > 1) {
5330                 u32 tbl_32 = 0;
5331
5332                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5333                         int shift = (i % 8) << 2;
5334
5335                         tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5336                         if ((i % 8) == 7) {
5337                                 BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5338                                 BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5339                                         BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5340                                         BNX2_RLUP_RSS_COMMAND_WRITE |
5341                                         BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5342                                 tbl_32 = 0;
5343                         }
5344                 }
5345
5346                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5347                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5348
5349                 BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5350
5351         }
5352 }
5353
5354 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5355 {
5356         u32 max, num_rings = 1;
5357
5358         while (ring_size > BNX2_MAX_RX_DESC_CNT) {
5359                 ring_size -= BNX2_MAX_RX_DESC_CNT;
5360                 num_rings++;
5361         }
5362         /* round to next power of 2 */
5363         max = max_size;
5364         while ((max & num_rings) == 0)
5365                 max >>= 1;
5366
5367         if (num_rings != max)
5368                 max <<= 1;
5369
5370         return max;
5371 }
5372
5373 static void
5374 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5375 {
5376         u32 rx_size, rx_space, jumbo_size;
5377
5378         /* 8 for CRC and VLAN */
5379         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5380
5381         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5382                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5383
5384         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5385         bp->rx_pg_ring_size = 0;
5386         bp->rx_max_pg_ring = 0;
5387         bp->rx_max_pg_ring_idx = 0;
5388         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5389                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5390
5391                 jumbo_size = size * pages;
5392                 if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
5393                         jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
5394
5395                 bp->rx_pg_ring_size = jumbo_size;
5396                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5397                                                         BNX2_MAX_RX_PG_RINGS);
5398                 bp->rx_max_pg_ring_idx =
5399                         (bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
5400                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5401                 bp->rx_copy_thresh = 0;
5402         }
5403
5404         bp->rx_buf_use_size = rx_size;
5405         /* hw alignment + build_skb() overhead*/
5406         bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5407                 NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5408         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5409         bp->rx_ring_size = size;
5410         bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
5411         bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
5412 }
5413
5414 static void
5415 bnx2_free_tx_skbs(struct bnx2 *bp)
5416 {
5417         int i;
5418
5419         for (i = 0; i < bp->num_tx_rings; i++) {
5420                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5421                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5422                 int j;
5423
5424                 if (txr->tx_buf_ring == NULL)
5425                         continue;
5426
5427                 for (j = 0; j < BNX2_TX_DESC_CNT; ) {
5428                         struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5429                         struct sk_buff *skb = tx_buf->skb;
5430                         int k, last;
5431
5432                         if (skb == NULL) {
5433                                 j = BNX2_NEXT_TX_BD(j);
5434                                 continue;
5435                         }
5436
5437                         dma_unmap_single(&bp->pdev->dev,
5438                                          dma_unmap_addr(tx_buf, mapping),
5439                                          skb_headlen(skb),
5440                                          PCI_DMA_TODEVICE);
5441
5442                         tx_buf->skb = NULL;
5443
5444                         last = tx_buf->nr_frags;
5445                         j = BNX2_NEXT_TX_BD(j);
5446                         for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
5447                                 tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
5448                                 dma_unmap_page(&bp->pdev->dev,
5449                                         dma_unmap_addr(tx_buf, mapping),
5450                                         skb_frag_size(&skb_shinfo(skb)->frags[k]),
5451                                         PCI_DMA_TODEVICE);
5452                         }
5453                         dev_kfree_skb(skb);
5454                 }
5455                 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5456         }
5457 }
5458
5459 static void
5460 bnx2_free_rx_skbs(struct bnx2 *bp)
5461 {
5462         int i;
5463
5464         for (i = 0; i < bp->num_rx_rings; i++) {
5465                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5466                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5467                 int j;
5468
5469                 if (rxr->rx_buf_ring == NULL)
5470                         return;
5471
5472                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5473                         struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5474                         u8 *data = rx_buf->data;
5475
5476                         if (data == NULL)
5477                                 continue;
5478
5479                         dma_unmap_single(&bp->pdev->dev,
5480                                          dma_unmap_addr(rx_buf, mapping),
5481                                          bp->rx_buf_use_size,
5482                                          PCI_DMA_FROMDEVICE);
5483
5484                         rx_buf->data = NULL;
5485
5486                         kfree(data);
5487                 }
5488                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5489                         bnx2_free_rx_page(bp, rxr, j);
5490         }
5491 }
5492
5493 static void
5494 bnx2_free_skbs(struct bnx2 *bp)
5495 {
5496         bnx2_free_tx_skbs(bp);
5497         bnx2_free_rx_skbs(bp);
5498 }
5499
5500 static int
5501 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5502 {
5503         int rc;
5504
5505         rc = bnx2_reset_chip(bp, reset_code);
5506         bnx2_free_skbs(bp);
5507         if (rc)
5508                 return rc;
5509
5510         if ((rc = bnx2_init_chip(bp)) != 0)
5511                 return rc;
5512
5513         bnx2_init_all_rings(bp);
5514         return 0;
5515 }
5516
5517 static int
5518 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5519 {
5520         int rc;
5521
5522         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5523                 return rc;
5524
5525         spin_lock_bh(&bp->phy_lock);
5526         bnx2_init_phy(bp, reset_phy);
5527         bnx2_set_link(bp);
5528         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5529                 bnx2_remote_phy_event(bp);
5530         spin_unlock_bh(&bp->phy_lock);
5531         return 0;
5532 }
5533
5534 static int
5535 bnx2_shutdown_chip(struct bnx2 *bp)
5536 {
5537         u32 reset_code;
5538
5539         if (bp->flags & BNX2_FLAG_NO_WOL)
5540                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5541         else if (bp->wol)
5542                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5543         else
5544                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5545
5546         return bnx2_reset_chip(bp, reset_code);
5547 }
5548
5549 static int
5550 bnx2_test_registers(struct bnx2 *bp)
5551 {
5552         int ret;
5553         int i, is_5709;
5554         static const struct {
5555                 u16   offset;
5556                 u16   flags;
5557 #define BNX2_FL_NOT_5709        1
5558                 u32   rw_mask;
5559                 u32   ro_mask;
5560         } reg_tbl[] = {
5561                 { 0x006c, 0, 0x00000000, 0x0000003f },
5562                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5563                 { 0x0094, 0, 0x00000000, 0x00000000 },
5564
5565                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5566                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5567                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5568                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5569                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5570                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5571                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5572                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5573                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5574
5575                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5576                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5577                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5578                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5579                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5580                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5581
5582                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5583                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5584                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5585
5586                 { 0x1000, 0, 0x00000000, 0x00000001 },
5587                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5588
5589                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5590                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5591                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5592                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5593                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5594                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5595                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5596                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5597                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5598                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5599
5600                 { 0x1800, 0, 0x00000000, 0x00000001 },
5601                 { 0x1804, 0, 0x00000000, 0x00000003 },
5602
5603                 { 0x2800, 0, 0x00000000, 0x00000001 },
5604                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5605                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5606                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5607                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5608                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5609                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5610                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5611                 { 0x2840, 0, 0x00000000, 0xffffffff },
5612                 { 0x2844, 0, 0x00000000, 0xffffffff },
5613                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5614                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5615
5616                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5617                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5618
5619                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5620                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5621                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5622                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5623                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5624                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5625                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5626                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5627                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5628
5629                 { 0x5004, 0, 0x00000000, 0x0000007f },
5630                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5631
5632                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5633                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5634                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5635                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5636                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5637                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5638                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5639                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5640                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5641
5642                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5643                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5644                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5645                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5646                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5647                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5648                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5649                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5650                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5651                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5652                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5653                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5654                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5655                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5656                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5657                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5658                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5659                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5660                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5661                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5662                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5663                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5664                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5665
5666                 { 0xffff, 0, 0x00000000, 0x00000000 },
5667         };
5668
5669         ret = 0;
5670         is_5709 = 0;
5671         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5672                 is_5709 = 1;
5673
5674         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5675                 u32 offset, rw_mask, ro_mask, save_val, val;
5676                 u16 flags = reg_tbl[i].flags;
5677
5678                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5679                         continue;
5680
5681                 offset = (u32) reg_tbl[i].offset;
5682                 rw_mask = reg_tbl[i].rw_mask;
5683                 ro_mask = reg_tbl[i].ro_mask;
5684
5685                 save_val = readl(bp->regview + offset);
5686
5687                 writel(0, bp->regview + offset);
5688
5689                 val = readl(bp->regview + offset);
5690                 if ((val & rw_mask) != 0) {
5691                         goto reg_test_err;
5692                 }
5693
5694                 if ((val & ro_mask) != (save_val & ro_mask)) {
5695                         goto reg_test_err;
5696                 }
5697
5698                 writel(0xffffffff, bp->regview + offset);
5699
5700                 val = readl(bp->regview + offset);
5701                 if ((val & rw_mask) != rw_mask) {
5702                         goto reg_test_err;
5703                 }
5704
5705                 if ((val & ro_mask) != (save_val & ro_mask)) {
5706                         goto reg_test_err;
5707                 }
5708
5709                 writel(save_val, bp->regview + offset);
5710                 continue;
5711
5712 reg_test_err:
5713                 writel(save_val, bp->regview + offset);
5714                 ret = -ENODEV;
5715                 break;
5716         }
5717         return ret;
5718 }
5719
5720 static int
5721 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5722 {
5723         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5724                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5725         int i;
5726
5727         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5728                 u32 offset;
5729
5730                 for (offset = 0; offset < size; offset += 4) {
5731
5732                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5733
5734                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5735                                 test_pattern[i]) {
5736                                 return -ENODEV;
5737                         }
5738                 }
5739         }
5740         return 0;
5741 }
5742
5743 static int
5744 bnx2_test_memory(struct bnx2 *bp)
5745 {
5746         int ret = 0;
5747         int i;
5748         static struct mem_entry {
5749                 u32   offset;
5750                 u32   len;
5751         } mem_tbl_5706[] = {
5752                 { 0x60000,  0x4000 },
5753                 { 0xa0000,  0x3000 },
5754                 { 0xe0000,  0x4000 },
5755                 { 0x120000, 0x4000 },
5756                 { 0x1a0000, 0x4000 },
5757                 { 0x160000, 0x4000 },
5758                 { 0xffffffff, 0    },
5759         },
5760         mem_tbl_5709[] = {
5761                 { 0x60000,  0x4000 },
5762                 { 0xa0000,  0x3000 },
5763                 { 0xe0000,  0x4000 },
5764                 { 0x120000, 0x4000 },
5765                 { 0x1a0000, 0x4000 },
5766                 { 0xffffffff, 0    },
5767         };
5768         struct mem_entry *mem_tbl;
5769
5770         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5771                 mem_tbl = mem_tbl_5709;
5772         else
5773                 mem_tbl = mem_tbl_5706;
5774
5775         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5776                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5777                         mem_tbl[i].len)) != 0) {
5778                         return ret;
5779                 }
5780         }
5781
5782         return ret;
5783 }
5784
5785 #define BNX2_MAC_LOOPBACK       0
5786 #define BNX2_PHY_LOOPBACK       1
5787
5788 static int
5789 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5790 {
5791         unsigned int pkt_size, num_pkts, i;
5792         struct sk_buff *skb;
5793         u8 *data;
5794         unsigned char *packet;
5795         u16 rx_start_idx, rx_idx;
5796         dma_addr_t map;
5797         struct bnx2_tx_bd *txbd;
5798         struct bnx2_sw_bd *rx_buf;
5799         struct l2_fhdr *rx_hdr;
5800         int ret = -ENODEV;
5801         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5802         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5803         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5804
5805         tx_napi = bnapi;
5806
5807         txr = &tx_napi->tx_ring;
5808         rxr = &bnapi->rx_ring;
5809         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5810                 bp->loopback = MAC_LOOPBACK;
5811                 bnx2_set_mac_loopback(bp);
5812         }
5813         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5814                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5815                         return 0;
5816
5817                 bp->loopback = PHY_LOOPBACK;
5818                 bnx2_set_phy_loopback(bp);
5819         }
5820         else
5821                 return -EINVAL;
5822
5823         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5824         skb = netdev_alloc_skb(bp->dev, pkt_size);
5825         if (!skb)
5826                 return -ENOMEM;
5827         packet = skb_put(skb, pkt_size);
5828         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
5829         memset(packet + ETH_ALEN, 0x0, 8);
5830         for (i = 14; i < pkt_size; i++)
5831                 packet[i] = (unsigned char) (i & 0xff);
5832
5833         map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5834                              PCI_DMA_TODEVICE);
5835         if (dma_mapping_error(&bp->pdev->dev, map)) {
5836                 dev_kfree_skb(skb);
5837                 return -EIO;
5838         }
5839
5840         BNX2_WR(bp, BNX2_HC_COMMAND,
5841                 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5842
5843         BNX2_RD(bp, BNX2_HC_COMMAND);
5844
5845         udelay(5);
5846         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5847
5848         num_pkts = 0;
5849
5850         txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
5851
5852         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5853         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5854         txbd->tx_bd_mss_nbytes = pkt_size;
5855         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5856
5857         num_pkts++;
5858         txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
5859         txr->tx_prod_bseq += pkt_size;
5860
5861         BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5862         BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5863
5864         udelay(100);
5865
5866         BNX2_WR(bp, BNX2_HC_COMMAND,
5867                 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5868
5869         BNX2_RD(bp, BNX2_HC_COMMAND);
5870
5871         udelay(5);
5872
5873         dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5874         dev_kfree_skb(skb);
5875
5876         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5877                 goto loopback_test_done;
5878
5879         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5880         if (rx_idx != rx_start_idx + num_pkts) {
5881                 goto loopback_test_done;
5882         }
5883
5884         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5885         data = rx_buf->data;
5886
5887         rx_hdr = get_l2_fhdr(data);
5888         data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5889
5890         dma_sync_single_for_cpu(&bp->pdev->dev,
5891                 dma_unmap_addr(rx_buf, mapping),
5892                 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
5893
5894         if (rx_hdr->l2_fhdr_status &
5895                 (L2_FHDR_ERRORS_BAD_CRC |
5896                 L2_FHDR_ERRORS_PHY_DECODE |
5897                 L2_FHDR_ERRORS_ALIGNMENT |
5898                 L2_FHDR_ERRORS_TOO_SHORT |
5899                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5900
5901                 goto loopback_test_done;
5902         }
5903
5904         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5905                 goto loopback_test_done;
5906         }
5907
5908         for (i = 14; i < pkt_size; i++) {
5909                 if (*(data + i) != (unsigned char) (i & 0xff)) {
5910                         goto loopback_test_done;
5911                 }
5912         }
5913
5914         ret = 0;
5915
5916 loopback_test_done:
5917         bp->loopback = 0;
5918         return ret;
5919 }
5920
5921 #define BNX2_MAC_LOOPBACK_FAILED        1
5922 #define BNX2_PHY_LOOPBACK_FAILED        2
5923 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5924                                          BNX2_PHY_LOOPBACK_FAILED)
5925
5926 static int
5927 bnx2_test_loopback(struct bnx2 *bp)
5928 {
5929         int rc = 0;
5930
5931         if (!netif_running(bp->dev))
5932                 return BNX2_LOOPBACK_FAILED;
5933
5934         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5935         spin_lock_bh(&bp->phy_lock);
5936         bnx2_init_phy(bp, 1);
5937         spin_unlock_bh(&bp->phy_lock);
5938         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5939                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5940         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5941                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5942         return rc;
5943 }
5944
5945 #define NVRAM_SIZE 0x200
5946 #define CRC32_RESIDUAL 0xdebb20e3
5947
5948 static int
5949 bnx2_test_nvram(struct bnx2 *bp)
5950 {
5951         __be32 buf[NVRAM_SIZE / 4];
5952         u8 *data = (u8 *) buf;
5953         int rc = 0;
5954         u32 magic, csum;
5955
5956         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5957                 goto test_nvram_done;
5958
5959         magic = be32_to_cpu(buf[0]);
5960         if (magic != 0x669955aa) {
5961                 rc = -ENODEV;
5962                 goto test_nvram_done;
5963         }
5964
5965         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5966                 goto test_nvram_done;
5967
5968         csum = ether_crc_le(0x100, data);
5969         if (csum != CRC32_RESIDUAL) {
5970                 rc = -ENODEV;
5971                 goto test_nvram_done;
5972         }
5973
5974         csum = ether_crc_le(0x100, data + 0x100);
5975         if (csum != CRC32_RESIDUAL) {
5976                 rc = -ENODEV;
5977         }
5978
5979 test_nvram_done:
5980         return rc;
5981 }
5982
5983 static int
5984 bnx2_test_link(struct bnx2 *bp)
5985 {
5986         u32 bmsr;
5987
5988         if (!netif_running(bp->dev))
5989                 return -ENODEV;
5990
5991         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5992                 if (bp->link_up)
5993                         return 0;
5994                 return -ENODEV;
5995         }
5996         spin_lock_bh(&bp->phy_lock);
5997         bnx2_enable_bmsr1(bp);
5998         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5999         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
6000         bnx2_disable_bmsr1(bp);
6001         spin_unlock_bh(&bp->phy_lock);
6002
6003         if (bmsr & BMSR_LSTATUS) {
6004                 return 0;
6005         }
6006         return -ENODEV;
6007 }
6008
6009 static int
6010 bnx2_test_intr(struct bnx2 *bp)
6011 {
6012         int i;
6013         u16 status_idx;
6014
6015         if (!netif_running(bp->dev))
6016                 return -ENODEV;
6017
6018         status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
6019
6020         /* This register is not touched during run-time. */
6021         BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
6022         BNX2_RD(bp, BNX2_HC_COMMAND);
6023
6024         for (i = 0; i < 10; i++) {
6025                 if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
6026                         status_idx) {
6027
6028                         break;
6029                 }
6030
6031                 msleep_interruptible(10);
6032         }
6033         if (i < 10)
6034                 return 0;
6035
6036         return -ENODEV;
6037 }
6038
6039 /* Determining link for parallel detection. */
6040 static int
6041 bnx2_5706_serdes_has_link(struct bnx2 *bp)
6042 {
6043         u32 mode_ctl, an_dbg, exp;
6044
6045         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
6046                 return 0;
6047
6048         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
6049         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
6050
6051         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
6052                 return 0;
6053
6054         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6055         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6056         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6057
6058         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
6059                 return 0;
6060
6061         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
6062         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6063         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6064
6065         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
6066                 return 0;
6067
6068         return 1;
6069 }
6070
6071 static void
6072 bnx2_5706_serdes_timer(struct bnx2 *bp)
6073 {
6074         int check_link = 1;
6075
6076         spin_lock(&bp->phy_lock);
6077         if (bp->serdes_an_pending) {
6078                 bp->serdes_an_pending--;
6079                 check_link = 0;
6080         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6081                 u32 bmcr;
6082
6083                 bp->current_interval = BNX2_TIMER_INTERVAL;
6084
6085                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6086
6087                 if (bmcr & BMCR_ANENABLE) {
6088                         if (bnx2_5706_serdes_has_link(bp)) {
6089                                 bmcr &= ~BMCR_ANENABLE;
6090                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6091                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6092                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6093                         }
6094                 }
6095         }
6096         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6097                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6098                 u32 phy2;
6099
6100                 bnx2_write_phy(bp, 0x17, 0x0f01);
6101                 bnx2_read_phy(bp, 0x15, &phy2);
6102                 if (phy2 & 0x20) {
6103                         u32 bmcr;
6104
6105                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6106                         bmcr |= BMCR_ANENABLE;
6107                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6108
6109                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6110                 }
6111         } else
6112                 bp->current_interval = BNX2_TIMER_INTERVAL;
6113
6114         if (check_link) {
6115                 u32 val;
6116
6117                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6118                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6119                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6120
6121                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6122                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6123                                 bnx2_5706s_force_link_dn(bp, 1);
6124                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6125                         } else
6126                                 bnx2_set_link(bp);
6127                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6128                         bnx2_set_link(bp);
6129         }
6130         spin_unlock(&bp->phy_lock);
6131 }
6132
6133 static void
6134 bnx2_5708_serdes_timer(struct bnx2 *bp)
6135 {
6136         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6137                 return;
6138
6139         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6140                 bp->serdes_an_pending = 0;
6141                 return;
6142         }
6143
6144         spin_lock(&bp->phy_lock);
6145         if (bp->serdes_an_pending)
6146                 bp->serdes_an_pending--;
6147         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6148                 u32 bmcr;
6149
6150                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6151                 if (bmcr & BMCR_ANENABLE) {
6152                         bnx2_enable_forced_2g5(bp);
6153                         bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6154                 } else {
6155                         bnx2_disable_forced_2g5(bp);
6156                         bp->serdes_an_pending = 2;
6157                         bp->current_interval = BNX2_TIMER_INTERVAL;
6158                 }
6159
6160         } else
6161                 bp->current_interval = BNX2_TIMER_INTERVAL;
6162
6163         spin_unlock(&bp->phy_lock);
6164 }
6165
6166 static void
6167 bnx2_timer(unsigned long data)
6168 {
6169         struct bnx2 *bp = (struct bnx2 *) data;
6170
6171         if (!netif_running(bp->dev))
6172                 return;
6173
6174         if (atomic_read(&bp->intr_sem) != 0)
6175                 goto bnx2_restart_timer;
6176
6177         if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6178              BNX2_FLAG_USING_MSI)
6179                 bnx2_chk_missed_msi(bp);
6180
6181         bnx2_send_heart_beat(bp);
6182
6183         bp->stats_blk->stat_FwRxDrop =
6184                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6185
6186         /* workaround occasional corrupted counters */
6187         if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6188                 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6189                         BNX2_HC_COMMAND_STATS_NOW);
6190
6191         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6192                 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
6193                         bnx2_5706_serdes_timer(bp);
6194                 else
6195                         bnx2_5708_serdes_timer(bp);
6196         }
6197
6198 bnx2_restart_timer:
6199         mod_timer(&bp->timer, jiffies + bp->current_interval);
6200 }
6201
6202 static int
6203 bnx2_request_irq(struct bnx2 *bp)
6204 {
6205         unsigned long flags;
6206         struct bnx2_irq *irq;
6207         int rc = 0, i;
6208
6209         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6210                 flags = 0;
6211         else
6212                 flags = IRQF_SHARED;
6213
6214         for (i = 0; i < bp->irq_nvecs; i++) {
6215                 irq = &bp->irq_tbl[i];
6216                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6217                                  &bp->bnx2_napi[i]);
6218                 if (rc)
6219                         break;
6220                 irq->requested = 1;
6221         }
6222         return rc;
6223 }
6224
6225 static void
6226 __bnx2_free_irq(struct bnx2 *bp)
6227 {
6228         struct bnx2_irq *irq;
6229         int i;
6230
6231         for (i = 0; i < bp->irq_nvecs; i++) {
6232                 irq = &bp->irq_tbl[i];
6233                 if (irq->requested)
6234                         free_irq(irq->vector, &bp->bnx2_napi[i]);
6235                 irq->requested = 0;
6236         }
6237 }
6238
6239 static void
6240 bnx2_free_irq(struct bnx2 *bp)
6241 {
6242
6243         __bnx2_free_irq(bp);
6244         if (bp->flags & BNX2_FLAG_USING_MSI)
6245                 pci_disable_msi(bp->pdev);
6246         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6247                 pci_disable_msix(bp->pdev);
6248
6249         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6250 }
6251
6252 static void
6253 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6254 {
6255         int i, total_vecs;
6256         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6257         struct net_device *dev = bp->dev;
6258         const int len = sizeof(bp->irq_tbl[0].name);
6259
6260         bnx2_setup_msix_tbl(bp);
6261         BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6262         BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6263         BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6264
6265         /*  Need to flush the previous three writes to ensure MSI-X
6266          *  is setup properly */
6267         BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
6268
6269         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6270                 msix_ent[i].entry = i;
6271                 msix_ent[i].vector = 0;
6272         }
6273
6274         total_vecs = msix_vecs;
6275 #ifdef BCM_CNIC
6276         total_vecs++;
6277 #endif
6278         total_vecs = pci_enable_msix_range(bp->pdev, msix_ent,
6279                                            BNX2_MIN_MSIX_VEC, total_vecs);
6280         if (total_vecs < 0)
6281                 return;
6282
6283         msix_vecs = total_vecs;
6284 #ifdef BCM_CNIC
6285         msix_vecs--;
6286 #endif
6287         bp->irq_nvecs = msix_vecs;
6288         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6289         for (i = 0; i < total_vecs; i++) {
6290                 bp->irq_tbl[i].vector = msix_ent[i].vector;
6291                 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6292                 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6293         }
6294 }
6295
6296 static int
6297 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6298 {
6299         int cpus = netif_get_num_default_rss_queues();
6300         int msix_vecs;
6301
6302         if (!bp->num_req_rx_rings)
6303                 msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6304         else if (!bp->num_req_tx_rings)
6305                 msix_vecs = max(cpus, bp->num_req_rx_rings);
6306         else
6307                 msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6308
6309         msix_vecs = min(msix_vecs, RX_MAX_RINGS);
6310
6311         bp->irq_tbl[0].handler = bnx2_interrupt;
6312         strcpy(bp->irq_tbl[0].name, bp->dev->name);
6313         bp->irq_nvecs = 1;
6314         bp->irq_tbl[0].vector = bp->pdev->irq;
6315
6316         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6317                 bnx2_enable_msix(bp, msix_vecs);
6318
6319         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6320             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6321                 if (pci_enable_msi(bp->pdev) == 0) {
6322                         bp->flags |= BNX2_FLAG_USING_MSI;
6323                         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
6324                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6325                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6326                         } else
6327                                 bp->irq_tbl[0].handler = bnx2_msi;
6328
6329                         bp->irq_tbl[0].vector = bp->pdev->irq;
6330                 }
6331         }
6332
6333         if (!bp->num_req_tx_rings)
6334                 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6335         else
6336                 bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6337
6338         if (!bp->num_req_rx_rings)
6339                 bp->num_rx_rings = bp->irq_nvecs;
6340         else
6341                 bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6342
6343         netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6344
6345         return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6346 }
6347
6348 /* Called with rtnl_lock */
6349 static int
6350 bnx2_open(struct net_device *dev)
6351 {
6352         struct bnx2 *bp = netdev_priv(dev);
6353         int rc;
6354
6355         rc = bnx2_request_firmware(bp);
6356         if (rc < 0)
6357                 goto out;
6358
6359         netif_carrier_off(dev);
6360
6361         bnx2_disable_int(bp);
6362
6363         rc = bnx2_setup_int_mode(bp, disable_msi);
6364         if (rc)
6365                 goto open_err;
6366         bnx2_init_napi(bp);
6367         bnx2_napi_enable(bp);
6368         rc = bnx2_alloc_mem(bp);
6369         if (rc)
6370                 goto open_err;
6371
6372         rc = bnx2_request_irq(bp);
6373         if (rc)
6374                 goto open_err;
6375
6376         rc = bnx2_init_nic(bp, 1);
6377         if (rc)
6378                 goto open_err;
6379
6380         mod_timer(&bp->timer, jiffies + bp->current_interval);
6381
6382         atomic_set(&bp->intr_sem, 0);
6383
6384         memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6385
6386         bnx2_enable_int(bp);
6387
6388         if (bp->flags & BNX2_FLAG_USING_MSI) {
6389                 /* Test MSI to make sure it is working
6390                  * If MSI test fails, go back to INTx mode
6391                  */
6392                 if (bnx2_test_intr(bp) != 0) {
6393                         netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6394
6395                         bnx2_disable_int(bp);
6396                         bnx2_free_irq(bp);
6397
6398                         bnx2_setup_int_mode(bp, 1);
6399
6400                         rc = bnx2_init_nic(bp, 0);
6401
6402                         if (!rc)
6403                                 rc = bnx2_request_irq(bp);
6404
6405                         if (rc) {
6406                                 del_timer_sync(&bp->timer);
6407                                 goto open_err;
6408                         }
6409                         bnx2_enable_int(bp);
6410                 }
6411         }
6412         if (bp->flags & BNX2_FLAG_USING_MSI)
6413                 netdev_info(dev, "using MSI\n");
6414         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6415                 netdev_info(dev, "using MSIX\n");
6416
6417         netif_tx_start_all_queues(dev);
6418 out:
6419         return rc;
6420
6421 open_err:
6422         bnx2_napi_disable(bp);
6423         bnx2_free_skbs(bp);
6424         bnx2_free_irq(bp);
6425         bnx2_free_mem(bp);
6426         bnx2_del_napi(bp);
6427         bnx2_release_firmware(bp);
6428         goto out;
6429 }
6430
6431 static void
6432 bnx2_reset_task(struct work_struct *work)
6433 {
6434         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6435         int rc;
6436         u16 pcicmd;
6437
6438         rtnl_lock();
6439         if (!netif_running(bp->dev)) {
6440                 rtnl_unlock();
6441                 return;
6442         }
6443
6444         bnx2_netif_stop(bp, true);
6445
6446         pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
6447         if (!(pcicmd & PCI_COMMAND_MEMORY)) {
6448                 /* in case PCI block has reset */
6449                 pci_restore_state(bp->pdev);
6450                 pci_save_state(bp->pdev);
6451         }
6452         rc = bnx2_init_nic(bp, 1);
6453         if (rc) {
6454                 netdev_err(bp->dev, "failed to reset NIC, closing\n");
6455                 bnx2_napi_enable(bp);
6456                 dev_close(bp->dev);
6457                 rtnl_unlock();
6458                 return;
6459         }
6460
6461         atomic_set(&bp->intr_sem, 1);
6462         bnx2_netif_start(bp, true);
6463         rtnl_unlock();
6464 }
6465
6466 #define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
6467
6468 static void
6469 bnx2_dump_ftq(struct bnx2 *bp)
6470 {
6471         int i;
6472         u32 reg, bdidx, cid, valid;
6473         struct net_device *dev = bp->dev;
6474         static const struct ftq_reg {
6475                 char *name;
6476                 u32 off;
6477         } ftq_arr[] = {
6478                 BNX2_FTQ_ENTRY(RV2P_P),
6479                 BNX2_FTQ_ENTRY(RV2P_T),
6480                 BNX2_FTQ_ENTRY(RV2P_M),
6481                 BNX2_FTQ_ENTRY(TBDR_),
6482                 BNX2_FTQ_ENTRY(TDMA_),
6483                 BNX2_FTQ_ENTRY(TXP_),
6484                 BNX2_FTQ_ENTRY(TXP_),
6485                 BNX2_FTQ_ENTRY(TPAT_),
6486                 BNX2_FTQ_ENTRY(RXP_C),
6487                 BNX2_FTQ_ENTRY(RXP_),
6488                 BNX2_FTQ_ENTRY(COM_COMXQ_),
6489                 BNX2_FTQ_ENTRY(COM_COMTQ_),
6490                 BNX2_FTQ_ENTRY(COM_COMQ_),
6491                 BNX2_FTQ_ENTRY(CP_CPQ_),
6492         };
6493
6494         netdev_err(dev, "<--- start FTQ dump --->\n");
6495         for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
6496                 netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
6497                            bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6498
6499         netdev_err(dev, "CPU states:\n");
6500         for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
6501                 netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
6502                            reg, bnx2_reg_rd_ind(bp, reg),
6503                            bnx2_reg_rd_ind(bp, reg + 4),
6504                            bnx2_reg_rd_ind(bp, reg + 8),
6505                            bnx2_reg_rd_ind(bp, reg + 0x1c),
6506                            bnx2_reg_rd_ind(bp, reg + 0x1c),
6507                            bnx2_reg_rd_ind(bp, reg + 0x20));
6508
6509         netdev_err(dev, "<--- end FTQ dump --->\n");
6510         netdev_err(dev, "<--- start TBDC dump --->\n");
6511         netdev_err(dev, "TBDC free cnt: %ld\n",
6512                    BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6513         netdev_err(dev, "LINE     CID  BIDX   CMD  VALIDS\n");
6514         for (i = 0; i < 0x20; i++) {
6515                 int j = 0;
6516
6517                 BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
6518                 BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
6519                         BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6520                 BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6521                 while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
6522                         BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6523                         j++;
6524
6525                 cid = BNX2_RD(bp, BNX2_TBDC_CID);
6526                 bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
6527                 valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
6528                 netdev_err(dev, "%02x    %06x  %04lx   %02x    [%x]\n",
6529                            i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6530                            bdidx >> 24, (valid >> 8) & 0x0ff);
6531         }
6532         netdev_err(dev, "<--- end TBDC dump --->\n");
6533 }
6534
6535 static void
6536 bnx2_dump_state(struct bnx2 *bp)
6537 {
6538         struct net_device *dev = bp->dev;
6539         u32 val1, val2;
6540
6541         pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6542         netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6543                    atomic_read(&bp->intr_sem), val1);
6544         pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6545         pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6546         netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6547         netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6548                    BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
6549                    BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
6550         netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6551                    BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6552         netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6553                    BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6554         if (bp->flags & BNX2_FLAG_USING_MSIX)
6555                 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6556                            BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6557 }
6558
6559 static void
6560 bnx2_tx_timeout(struct net_device *dev)
6561 {
6562         struct bnx2 *bp = netdev_priv(dev);
6563
6564         bnx2_dump_ftq(bp);
6565         bnx2_dump_state(bp);
6566         bnx2_dump_mcp_state(bp);
6567
6568         /* This allows the netif to be shutdown gracefully before resetting */
6569         schedule_work(&bp->reset_task);
6570 }
6571
6572 /* Called with netif_tx_lock.
6573  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6574  * netif_wake_queue().
6575  */
6576 static netdev_tx_t
6577 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6578 {
6579         struct bnx2 *bp = netdev_priv(dev);
6580         dma_addr_t mapping;
6581         struct bnx2_tx_bd *txbd;
6582         struct bnx2_sw_tx_bd *tx_buf;
6583         u32 len, vlan_tag_flags, last_frag, mss;
6584         u16 prod, ring_prod;
6585         int i;
6586         struct bnx2_napi *bnapi;
6587         struct bnx2_tx_ring_info *txr;
6588         struct netdev_queue *txq;
6589
6590         /*  Determine which tx ring we will be placed on */
6591         i = skb_get_queue_mapping(skb);
6592         bnapi = &bp->bnx2_napi[i];
6593         txr = &bnapi->tx_ring;
6594         txq = netdev_get_tx_queue(dev, i);
6595
6596         if (unlikely(bnx2_tx_avail(bp, txr) <
6597             (skb_shinfo(skb)->nr_frags + 1))) {
6598                 netif_tx_stop_queue(txq);
6599                 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6600
6601                 return NETDEV_TX_BUSY;
6602         }
6603         len = skb_headlen(skb);
6604         prod = txr->tx_prod;
6605         ring_prod = BNX2_TX_RING_IDX(prod);
6606
6607         vlan_tag_flags = 0;
6608         if (skb->ip_summed == CHECKSUM_PARTIAL) {
6609                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6610         }
6611
6612         if (skb_vlan_tag_present(skb)) {
6613                 vlan_tag_flags |=
6614                         (TX_BD_FLAGS_VLAN_TAG | (skb_vlan_tag_get(skb) << 16));
6615         }
6616
6617         if ((mss = skb_shinfo(skb)->gso_size)) {
6618                 u32 tcp_opt_len;
6619                 struct iphdr *iph;
6620
6621                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6622
6623                 tcp_opt_len = tcp_optlen(skb);
6624
6625                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6626                         u32 tcp_off = skb_transport_offset(skb) -
6627                                       sizeof(struct ipv6hdr) - ETH_HLEN;
6628
6629                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6630                                           TX_BD_FLAGS_SW_FLAGS;
6631                         if (likely(tcp_off == 0))
6632                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6633                         else {
6634                                 tcp_off >>= 3;
6635                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
6636                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
6637                                                   ((tcp_off & 0x10) <<
6638                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
6639                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6640                         }
6641                 } else {
6642                         iph = ip_hdr(skb);
6643                         if (tcp_opt_len || (iph->ihl > 5)) {
6644                                 vlan_tag_flags |= ((iph->ihl - 5) +
6645                                                    (tcp_opt_len >> 2)) << 8;
6646                         }
6647                 }
6648         } else
6649                 mss = 0;
6650
6651         mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6652         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6653                 dev_kfree_skb_any(skb);
6654                 return NETDEV_TX_OK;
6655         }
6656
6657         tx_buf = &txr->tx_buf_ring[ring_prod];
6658         tx_buf->skb = skb;
6659         dma_unmap_addr_set(tx_buf, mapping, mapping);
6660
6661         txbd = &txr->tx_desc_ring[ring_prod];
6662
6663         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6664         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6665         txbd->tx_bd_mss_nbytes = len | (mss << 16);
6666         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6667
6668         last_frag = skb_shinfo(skb)->nr_frags;
6669         tx_buf->nr_frags = last_frag;
6670         tx_buf->is_gso = skb_is_gso(skb);
6671
6672         for (i = 0; i < last_frag; i++) {
6673                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6674
6675                 prod = BNX2_NEXT_TX_BD(prod);
6676                 ring_prod = BNX2_TX_RING_IDX(prod);
6677                 txbd = &txr->tx_desc_ring[ring_prod];
6678
6679                 len = skb_frag_size(frag);
6680                 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6681                                            DMA_TO_DEVICE);
6682                 if (dma_mapping_error(&bp->pdev->dev, mapping))
6683                         goto dma_error;
6684                 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6685                                    mapping);
6686
6687                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6688                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6689                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6690                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6691
6692         }
6693         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6694
6695         /* Sync BD data before updating TX mailbox */
6696         wmb();
6697
6698         netdev_tx_sent_queue(txq, skb->len);
6699
6700         prod = BNX2_NEXT_TX_BD(prod);
6701         txr->tx_prod_bseq += skb->len;
6702
6703         BNX2_WR16(bp, txr->tx_bidx_addr, prod);
6704         BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6705
6706         mmiowb();
6707
6708         txr->tx_prod = prod;
6709
6710         if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6711                 netif_tx_stop_queue(txq);
6712
6713                 /* netif_tx_stop_queue() must be done before checking
6714                  * tx index in bnx2_tx_avail() below, because in
6715                  * bnx2_tx_int(), we update tx index before checking for
6716                  * netif_tx_queue_stopped().
6717                  */
6718                 smp_mb();
6719                 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6720                         netif_tx_wake_queue(txq);
6721         }
6722
6723         return NETDEV_TX_OK;
6724 dma_error:
6725         /* save value of frag that failed */
6726         last_frag = i;
6727
6728         /* start back at beginning and unmap skb */
6729         prod = txr->tx_prod;
6730         ring_prod = BNX2_TX_RING_IDX(prod);
6731         tx_buf = &txr->tx_buf_ring[ring_prod];
6732         tx_buf->skb = NULL;
6733         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6734                          skb_headlen(skb), PCI_DMA_TODEVICE);
6735
6736         /* unmap remaining mapped pages */
6737         for (i = 0; i < last_frag; i++) {
6738                 prod = BNX2_NEXT_TX_BD(prod);
6739                 ring_prod = BNX2_TX_RING_IDX(prod);
6740                 tx_buf = &txr->tx_buf_ring[ring_prod];
6741                 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6742                                skb_frag_size(&skb_shinfo(skb)->frags[i]),
6743                                PCI_DMA_TODEVICE);
6744         }
6745
6746         dev_kfree_skb_any(skb);
6747         return NETDEV_TX_OK;
6748 }
6749
6750 /* Called with rtnl_lock */
6751 static int
6752 bnx2_close(struct net_device *dev)
6753 {
6754         struct bnx2 *bp = netdev_priv(dev);
6755
6756         bnx2_disable_int_sync(bp);
6757         bnx2_napi_disable(bp);
6758         netif_tx_disable(dev);
6759         del_timer_sync(&bp->timer);
6760         bnx2_shutdown_chip(bp);
6761         bnx2_free_irq(bp);
6762         bnx2_free_skbs(bp);
6763         bnx2_free_mem(bp);
6764         bnx2_del_napi(bp);
6765         bp->link_up = 0;
6766         netif_carrier_off(bp->dev);
6767         return 0;
6768 }
6769
6770 static void
6771 bnx2_save_stats(struct bnx2 *bp)
6772 {
6773         u32 *hw_stats = (u32 *) bp->stats_blk;
6774         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6775         int i;
6776
6777         /* The 1st 10 counters are 64-bit counters */
6778         for (i = 0; i < 20; i += 2) {
6779                 u32 hi;
6780                 u64 lo;
6781
6782                 hi = temp_stats[i] + hw_stats[i];
6783                 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6784                 if (lo > 0xffffffff)
6785                         hi++;
6786                 temp_stats[i] = hi;
6787                 temp_stats[i + 1] = lo & 0xffffffff;
6788         }
6789
6790         for ( ; i < sizeof(struct statistics_block) / 4; i++)
6791                 temp_stats[i] += hw_stats[i];
6792 }
6793
6794 #define GET_64BIT_NET_STATS64(ctr)              \
6795         (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6796
6797 #define GET_64BIT_NET_STATS(ctr)                                \
6798         GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +             \
6799         GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6800
6801 #define GET_32BIT_NET_STATS(ctr)                                \
6802         (unsigned long) (bp->stats_blk->ctr +                   \
6803                          bp->temp_stats_blk->ctr)
6804
6805 static struct rtnl_link_stats64 *
6806 bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6807 {
6808         struct bnx2 *bp = netdev_priv(dev);
6809
6810         if (bp->stats_blk == NULL)
6811                 return net_stats;
6812
6813         net_stats->rx_packets =
6814                 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6815                 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6816                 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6817
6818         net_stats->tx_packets =
6819                 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6820                 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6821                 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6822
6823         net_stats->rx_bytes =
6824                 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6825
6826         net_stats->tx_bytes =
6827                 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6828
6829         net_stats->multicast =
6830                 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6831
6832         net_stats->collisions =
6833                 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6834
6835         net_stats->rx_length_errors =
6836                 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6837                 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6838
6839         net_stats->rx_over_errors =
6840                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6841                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6842
6843         net_stats->rx_frame_errors =
6844                 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6845
6846         net_stats->rx_crc_errors =
6847                 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6848
6849         net_stats->rx_errors = net_stats->rx_length_errors +
6850                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6851                 net_stats->rx_crc_errors;
6852
6853         net_stats->tx_aborted_errors =
6854                 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6855                 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6856
6857         if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
6858             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
6859                 net_stats->tx_carrier_errors = 0;
6860         else {
6861                 net_stats->tx_carrier_errors =
6862                         GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6863         }
6864
6865         net_stats->tx_errors =
6866                 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6867                 net_stats->tx_aborted_errors +
6868                 net_stats->tx_carrier_errors;
6869
6870         net_stats->rx_missed_errors =
6871                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6872                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6873                 GET_32BIT_NET_STATS(stat_FwRxDrop);
6874
6875         return net_stats;
6876 }
6877
6878 /* All ethtool functions called with rtnl_lock */
6879
6880 static int
6881 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6882 {
6883         struct bnx2 *bp = netdev_priv(dev);
6884         int support_serdes = 0, support_copper = 0;
6885
6886         cmd->supported = SUPPORTED_Autoneg;
6887         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6888                 support_serdes = 1;
6889                 support_copper = 1;
6890         } else if (bp->phy_port == PORT_FIBRE)
6891                 support_serdes = 1;
6892         else
6893                 support_copper = 1;
6894
6895         if (support_serdes) {
6896                 cmd->supported |= SUPPORTED_1000baseT_Full |
6897                         SUPPORTED_FIBRE;
6898                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6899                         cmd->supported |= SUPPORTED_2500baseX_Full;
6900
6901         }
6902         if (support_copper) {
6903                 cmd->supported |= SUPPORTED_10baseT_Half |
6904                         SUPPORTED_10baseT_Full |
6905                         SUPPORTED_100baseT_Half |
6906                         SUPPORTED_100baseT_Full |
6907                         SUPPORTED_1000baseT_Full |
6908                         SUPPORTED_TP;
6909
6910         }
6911
6912         spin_lock_bh(&bp->phy_lock);
6913         cmd->port = bp->phy_port;
6914         cmd->advertising = bp->advertising;
6915
6916         if (bp->autoneg & AUTONEG_SPEED) {
6917                 cmd->autoneg = AUTONEG_ENABLE;
6918         } else {
6919                 cmd->autoneg = AUTONEG_DISABLE;
6920         }
6921
6922         if (netif_carrier_ok(dev)) {
6923                 ethtool_cmd_speed_set(cmd, bp->line_speed);
6924                 cmd->duplex = bp->duplex;
6925                 if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) {
6926                         if (bp->phy_flags & BNX2_PHY_FLAG_MDIX)
6927                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
6928                         else
6929                                 cmd->eth_tp_mdix = ETH_TP_MDI;
6930                 }
6931         }
6932         else {
6933                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
6934                 cmd->duplex = DUPLEX_UNKNOWN;
6935         }
6936         spin_unlock_bh(&bp->phy_lock);
6937
6938         cmd->transceiver = XCVR_INTERNAL;
6939         cmd->phy_address = bp->phy_addr;
6940
6941         return 0;
6942 }
6943
6944 static int
6945 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6946 {
6947         struct bnx2 *bp = netdev_priv(dev);
6948         u8 autoneg = bp->autoneg;
6949         u8 req_duplex = bp->req_duplex;
6950         u16 req_line_speed = bp->req_line_speed;
6951         u32 advertising = bp->advertising;
6952         int err = -EINVAL;
6953
6954         spin_lock_bh(&bp->phy_lock);
6955
6956         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6957                 goto err_out_unlock;
6958
6959         if (cmd->port != bp->phy_port &&
6960             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6961                 goto err_out_unlock;
6962
6963         /* If device is down, we can store the settings only if the user
6964          * is setting the currently active port.
6965          */
6966         if (!netif_running(dev) && cmd->port != bp->phy_port)
6967                 goto err_out_unlock;
6968
6969         if (cmd->autoneg == AUTONEG_ENABLE) {
6970                 autoneg |= AUTONEG_SPEED;
6971
6972                 advertising = cmd->advertising;
6973                 if (cmd->port == PORT_TP) {
6974                         advertising &= ETHTOOL_ALL_COPPER_SPEED;
6975                         if (!advertising)
6976                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6977                 } else {
6978                         advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6979                         if (!advertising)
6980                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6981                 }
6982                 advertising |= ADVERTISED_Autoneg;
6983         }
6984         else {
6985                 u32 speed = ethtool_cmd_speed(cmd);
6986                 if (cmd->port == PORT_FIBRE) {
6987                         if ((speed != SPEED_1000 &&
6988                              speed != SPEED_2500) ||
6989                             (cmd->duplex != DUPLEX_FULL))
6990                                 goto err_out_unlock;
6991
6992                         if (speed == SPEED_2500 &&
6993                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6994                                 goto err_out_unlock;
6995                 } else if (speed == SPEED_1000 || speed == SPEED_2500)
6996                         goto err_out_unlock;
6997
6998                 autoneg &= ~AUTONEG_SPEED;
6999                 req_line_speed = speed;
7000                 req_duplex = cmd->duplex;
7001                 advertising = 0;
7002         }
7003
7004         bp->autoneg = autoneg;
7005         bp->advertising = advertising;
7006         bp->req_line_speed = req_line_speed;
7007         bp->req_duplex = req_duplex;
7008
7009         err = 0;
7010         /* If device is down, the new settings will be picked up when it is
7011          * brought up.
7012          */
7013         if (netif_running(dev))
7014                 err = bnx2_setup_phy(bp, cmd->port);
7015
7016 err_out_unlock:
7017         spin_unlock_bh(&bp->phy_lock);
7018
7019         return err;
7020 }
7021
7022 static void
7023 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7024 {
7025         struct bnx2 *bp = netdev_priv(dev);
7026
7027         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
7028         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
7029         strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
7030         strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
7031 }
7032
7033 #define BNX2_REGDUMP_LEN                (32 * 1024)
7034
7035 static int
7036 bnx2_get_regs_len(struct net_device *dev)
7037 {
7038         return BNX2_REGDUMP_LEN;
7039 }
7040
7041 static void
7042 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
7043 {
7044         u32 *p = _p, i, offset;
7045         u8 *orig_p = _p;
7046         struct bnx2 *bp = netdev_priv(dev);
7047         static const u32 reg_boundaries[] = {
7048                 0x0000, 0x0098, 0x0400, 0x045c,
7049                 0x0800, 0x0880, 0x0c00, 0x0c10,
7050                 0x0c30, 0x0d08, 0x1000, 0x101c,
7051                 0x1040, 0x1048, 0x1080, 0x10a4,
7052                 0x1400, 0x1490, 0x1498, 0x14f0,
7053                 0x1500, 0x155c, 0x1580, 0x15dc,
7054                 0x1600, 0x1658, 0x1680, 0x16d8,
7055                 0x1800, 0x1820, 0x1840, 0x1854,
7056                 0x1880, 0x1894, 0x1900, 0x1984,
7057                 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
7058                 0x1c80, 0x1c94, 0x1d00, 0x1d84,
7059                 0x2000, 0x2030, 0x23c0, 0x2400,
7060                 0x2800, 0x2820, 0x2830, 0x2850,
7061                 0x2b40, 0x2c10, 0x2fc0, 0x3058,
7062                 0x3c00, 0x3c94, 0x4000, 0x4010,
7063                 0x4080, 0x4090, 0x43c0, 0x4458,
7064                 0x4c00, 0x4c18, 0x4c40, 0x4c54,
7065                 0x4fc0, 0x5010, 0x53c0, 0x5444,
7066                 0x5c00, 0x5c18, 0x5c80, 0x5c90,
7067                 0x5fc0, 0x6000, 0x6400, 0x6428,
7068                 0x6800, 0x6848, 0x684c, 0x6860,
7069                 0x6888, 0x6910, 0x8000
7070         };
7071
7072         regs->version = 0;
7073
7074         memset(p, 0, BNX2_REGDUMP_LEN);
7075
7076         if (!netif_running(bp->dev))
7077                 return;
7078
7079         i = 0;
7080         offset = reg_boundaries[0];
7081         p += offset;
7082         while (offset < BNX2_REGDUMP_LEN) {
7083                 *p++ = BNX2_RD(bp, offset);
7084                 offset += 4;
7085                 if (offset == reg_boundaries[i + 1]) {
7086                         offset = reg_boundaries[i + 2];
7087                         p = (u32 *) (orig_p + offset);
7088                         i += 2;
7089                 }
7090         }
7091 }
7092
7093 static void
7094 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7095 {
7096         struct bnx2 *bp = netdev_priv(dev);
7097
7098         if (bp->flags & BNX2_FLAG_NO_WOL) {
7099                 wol->supported = 0;
7100                 wol->wolopts = 0;
7101         }
7102         else {
7103                 wol->supported = WAKE_MAGIC;
7104                 if (bp->wol)
7105                         wol->wolopts = WAKE_MAGIC;
7106                 else
7107                         wol->wolopts = 0;
7108         }
7109         memset(&wol->sopass, 0, sizeof(wol->sopass));
7110 }
7111
7112 static int
7113 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7114 {
7115         struct bnx2 *bp = netdev_priv(dev);
7116
7117         if (wol->wolopts & ~WAKE_MAGIC)
7118                 return -EINVAL;
7119
7120         if (wol->wolopts & WAKE_MAGIC) {
7121                 if (bp->flags & BNX2_FLAG_NO_WOL)
7122                         return -EINVAL;
7123
7124                 bp->wol = 1;
7125         }
7126         else {
7127                 bp->wol = 0;
7128         }
7129
7130         device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
7131
7132         return 0;
7133 }
7134
7135 static int
7136 bnx2_nway_reset(struct net_device *dev)
7137 {
7138         struct bnx2 *bp = netdev_priv(dev);
7139         u32 bmcr;
7140
7141         if (!netif_running(dev))
7142                 return -EAGAIN;
7143
7144         if (!(bp->autoneg & AUTONEG_SPEED)) {
7145                 return -EINVAL;
7146         }
7147
7148         spin_lock_bh(&bp->phy_lock);
7149
7150         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7151                 int rc;
7152
7153                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7154                 spin_unlock_bh(&bp->phy_lock);
7155                 return rc;
7156         }
7157
7158         /* Force a link down visible on the other side */
7159         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7160                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7161                 spin_unlock_bh(&bp->phy_lock);
7162
7163                 msleep(20);
7164
7165                 spin_lock_bh(&bp->phy_lock);
7166
7167                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7168                 bp->serdes_an_pending = 1;
7169                 mod_timer(&bp->timer, jiffies + bp->current_interval);
7170         }
7171
7172         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7173         bmcr &= ~BMCR_LOOPBACK;
7174         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7175
7176         spin_unlock_bh(&bp->phy_lock);
7177
7178         return 0;
7179 }
7180
7181 static u32
7182 bnx2_get_link(struct net_device *dev)
7183 {
7184         struct bnx2 *bp = netdev_priv(dev);
7185
7186         return bp->link_up;
7187 }
7188
7189 static int
7190 bnx2_get_eeprom_len(struct net_device *dev)
7191 {
7192         struct bnx2 *bp = netdev_priv(dev);
7193
7194         if (bp->flash_info == NULL)
7195                 return 0;
7196
7197         return (int) bp->flash_size;
7198 }
7199
7200 static int
7201 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7202                 u8 *eebuf)
7203 {
7204         struct bnx2 *bp = netdev_priv(dev);
7205         int rc;
7206
7207         /* parameters already validated in ethtool_get_eeprom */
7208
7209         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7210
7211         return rc;
7212 }
7213
7214 static int
7215 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7216                 u8 *eebuf)
7217 {
7218         struct bnx2 *bp = netdev_priv(dev);
7219         int rc;
7220
7221         /* parameters already validated in ethtool_set_eeprom */
7222
7223         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7224
7225         return rc;
7226 }
7227
7228 static int
7229 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7230 {
7231         struct bnx2 *bp = netdev_priv(dev);
7232
7233         memset(coal, 0, sizeof(struct ethtool_coalesce));
7234
7235         coal->rx_coalesce_usecs = bp->rx_ticks;
7236         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7237         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7238         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7239
7240         coal->tx_coalesce_usecs = bp->tx_ticks;
7241         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7242         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7243         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7244
7245         coal->stats_block_coalesce_usecs = bp->stats_ticks;
7246
7247         return 0;
7248 }
7249
7250 static int
7251 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7252 {
7253         struct bnx2 *bp = netdev_priv(dev);
7254
7255         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7256         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7257
7258         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7259         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7260
7261         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7262         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7263
7264         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7265         if (bp->rx_quick_cons_trip_int > 0xff)
7266                 bp->rx_quick_cons_trip_int = 0xff;
7267
7268         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7269         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7270
7271         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7272         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7273
7274         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7275         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7276
7277         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7278         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7279                 0xff;
7280
7281         bp->stats_ticks = coal->stats_block_coalesce_usecs;
7282         if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7283                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7284                         bp->stats_ticks = USEC_PER_SEC;
7285         }
7286         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7287                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7288         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7289
7290         if (netif_running(bp->dev)) {
7291                 bnx2_netif_stop(bp, true);
7292                 bnx2_init_nic(bp, 0);
7293                 bnx2_netif_start(bp, true);
7294         }
7295
7296         return 0;
7297 }
7298
7299 static void
7300 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7301 {
7302         struct bnx2 *bp = netdev_priv(dev);
7303
7304         ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
7305         ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
7306
7307         ering->rx_pending = bp->rx_ring_size;
7308         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7309
7310         ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
7311         ering->tx_pending = bp->tx_ring_size;
7312 }
7313
7314 static int
7315 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7316 {
7317         if (netif_running(bp->dev)) {
7318                 /* Reset will erase chipset stats; save them */
7319                 bnx2_save_stats(bp);
7320
7321                 bnx2_netif_stop(bp, true);
7322                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7323                 if (reset_irq) {
7324                         bnx2_free_irq(bp);
7325                         bnx2_del_napi(bp);
7326                 } else {
7327                         __bnx2_free_irq(bp);
7328                 }
7329                 bnx2_free_skbs(bp);
7330                 bnx2_free_mem(bp);
7331         }
7332
7333         bnx2_set_rx_ring_size(bp, rx);
7334         bp->tx_ring_size = tx;
7335
7336         if (netif_running(bp->dev)) {
7337                 int rc = 0;
7338
7339                 if (reset_irq) {
7340                         rc = bnx2_setup_int_mode(bp, disable_msi);
7341                         bnx2_init_napi(bp);
7342                 }
7343
7344                 if (!rc)
7345                         rc = bnx2_alloc_mem(bp);
7346
7347                 if (!rc)
7348                         rc = bnx2_request_irq(bp);
7349
7350                 if (!rc)
7351                         rc = bnx2_init_nic(bp, 0);
7352
7353                 if (rc) {
7354                         bnx2_napi_enable(bp);
7355                         dev_close(bp->dev);
7356                         return rc;
7357                 }
7358 #ifdef BCM_CNIC
7359                 mutex_lock(&bp->cnic_lock);
7360                 /* Let cnic know about the new status block. */
7361                 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7362                         bnx2_setup_cnic_irq_info(bp);
7363                 mutex_unlock(&bp->cnic_lock);
7364 #endif
7365                 bnx2_netif_start(bp, true);
7366         }
7367         return 0;
7368 }
7369
7370 static int
7371 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7372 {
7373         struct bnx2 *bp = netdev_priv(dev);
7374         int rc;
7375
7376         if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
7377                 (ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
7378                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7379
7380                 return -EINVAL;
7381         }
7382         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7383                                    false);
7384         return rc;
7385 }
7386
7387 static void
7388 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7389 {
7390         struct bnx2 *bp = netdev_priv(dev);
7391
7392         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7393         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7394         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7395 }
7396
7397 static int
7398 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7399 {
7400         struct bnx2 *bp = netdev_priv(dev);
7401
7402         bp->req_flow_ctrl = 0;
7403         if (epause->rx_pause)
7404                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7405         if (epause->tx_pause)
7406                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7407
7408         if (epause->autoneg) {
7409                 bp->autoneg |= AUTONEG_FLOW_CTRL;
7410         }
7411         else {
7412                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7413         }
7414
7415         if (netif_running(dev)) {
7416                 spin_lock_bh(&bp->phy_lock);
7417                 bnx2_setup_phy(bp, bp->phy_port);
7418                 spin_unlock_bh(&bp->phy_lock);
7419         }
7420
7421         return 0;
7422 }
7423
7424 static struct {
7425         char string[ETH_GSTRING_LEN];
7426 } bnx2_stats_str_arr[] = {
7427         { "rx_bytes" },
7428         { "rx_error_bytes" },
7429         { "tx_bytes" },
7430         { "tx_error_bytes" },
7431         { "rx_ucast_packets" },
7432         { "rx_mcast_packets" },
7433         { "rx_bcast_packets" },
7434         { "tx_ucast_packets" },
7435         { "tx_mcast_packets" },
7436         { "tx_bcast_packets" },
7437         { "tx_mac_errors" },
7438         { "tx_carrier_errors" },
7439         { "rx_crc_errors" },
7440         { "rx_align_errors" },
7441         { "tx_single_collisions" },
7442         { "tx_multi_collisions" },
7443         { "tx_deferred" },
7444         { "tx_excess_collisions" },
7445         { "tx_late_collisions" },
7446         { "tx_total_collisions" },
7447         { "rx_fragments" },
7448         { "rx_jabbers" },
7449         { "rx_undersize_packets" },
7450         { "rx_oversize_packets" },
7451         { "rx_64_byte_packets" },
7452         { "rx_65_to_127_byte_packets" },
7453         { "rx_128_to_255_byte_packets" },
7454         { "rx_256_to_511_byte_packets" },
7455         { "rx_512_to_1023_byte_packets" },
7456         { "rx_1024_to_1522_byte_packets" },
7457         { "rx_1523_to_9022_byte_packets" },
7458         { "tx_64_byte_packets" },
7459         { "tx_65_to_127_byte_packets" },
7460         { "tx_128_to_255_byte_packets" },
7461         { "tx_256_to_511_byte_packets" },
7462         { "tx_512_to_1023_byte_packets" },
7463         { "tx_1024_to_1522_byte_packets" },
7464         { "tx_1523_to_9022_byte_packets" },
7465         { "rx_xon_frames" },
7466         { "rx_xoff_frames" },
7467         { "tx_xon_frames" },
7468         { "tx_xoff_frames" },
7469         { "rx_mac_ctrl_frames" },
7470         { "rx_filtered_packets" },
7471         { "rx_ftq_discards" },
7472         { "rx_discards" },
7473         { "rx_fw_discards" },
7474 };
7475
7476 #define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7477
7478 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7479
7480 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7481     STATS_OFFSET32(stat_IfHCInOctets_hi),
7482     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7483     STATS_OFFSET32(stat_IfHCOutOctets_hi),
7484     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7485     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7486     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7487     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7488     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7489     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7490     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7491     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7492     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7493     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7494     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7495     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7496     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7497     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7498     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7499     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7500     STATS_OFFSET32(stat_EtherStatsCollisions),
7501     STATS_OFFSET32(stat_EtherStatsFragments),
7502     STATS_OFFSET32(stat_EtherStatsJabbers),
7503     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7504     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7505     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7506     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7507     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7508     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7509     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7510     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7511     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7512     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7513     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7514     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7515     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7516     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7517     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7518     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7519     STATS_OFFSET32(stat_XonPauseFramesReceived),
7520     STATS_OFFSET32(stat_XoffPauseFramesReceived),
7521     STATS_OFFSET32(stat_OutXonSent),
7522     STATS_OFFSET32(stat_OutXoffSent),
7523     STATS_OFFSET32(stat_MacControlFramesReceived),
7524     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7525     STATS_OFFSET32(stat_IfInFTQDiscards),
7526     STATS_OFFSET32(stat_IfInMBUFDiscards),
7527     STATS_OFFSET32(stat_FwRxDrop),
7528 };
7529
7530 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7531  * skipped because of errata.
7532  */
7533 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7534         8,0,8,8,8,8,8,8,8,8,
7535         4,0,4,4,4,4,4,4,4,4,
7536         4,4,4,4,4,4,4,4,4,4,
7537         4,4,4,4,4,4,4,4,4,4,
7538         4,4,4,4,4,4,4,
7539 };
7540
7541 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7542         8,0,8,8,8,8,8,8,8,8,
7543         4,4,4,4,4,4,4,4,4,4,
7544         4,4,4,4,4,4,4,4,4,4,
7545         4,4,4,4,4,4,4,4,4,4,
7546         4,4,4,4,4,4,4,
7547 };
7548
7549 #define BNX2_NUM_TESTS 6
7550
7551 static struct {
7552         char string[ETH_GSTRING_LEN];
7553 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7554         { "register_test (offline)" },
7555         { "memory_test (offline)" },
7556         { "loopback_test (offline)" },
7557         { "nvram_test (online)" },
7558         { "interrupt_test (online)" },
7559         { "link_test (online)" },
7560 };
7561
7562 static int
7563 bnx2_get_sset_count(struct net_device *dev, int sset)
7564 {
7565         switch (sset) {
7566         case ETH_SS_TEST:
7567                 return BNX2_NUM_TESTS;
7568         case ETH_SS_STATS:
7569                 return BNX2_NUM_STATS;
7570         default:
7571                 return -EOPNOTSUPP;
7572         }
7573 }
7574
7575 static void
7576 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7577 {
7578         struct bnx2 *bp = netdev_priv(dev);
7579
7580         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7581         if (etest->flags & ETH_TEST_FL_OFFLINE) {
7582                 int i;
7583
7584                 bnx2_netif_stop(bp, true);
7585                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7586                 bnx2_free_skbs(bp);
7587
7588                 if (bnx2_test_registers(bp) != 0) {
7589                         buf[0] = 1;
7590                         etest->flags |= ETH_TEST_FL_FAILED;
7591                 }
7592                 if (bnx2_test_memory(bp) != 0) {
7593                         buf[1] = 1;
7594                         etest->flags |= ETH_TEST_FL_FAILED;
7595                 }
7596                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7597                         etest->flags |= ETH_TEST_FL_FAILED;
7598
7599                 if (!netif_running(bp->dev))
7600                         bnx2_shutdown_chip(bp);
7601                 else {
7602                         bnx2_init_nic(bp, 1);
7603                         bnx2_netif_start(bp, true);
7604                 }
7605
7606                 /* wait for link up */
7607                 for (i = 0; i < 7; i++) {
7608                         if (bp->link_up)
7609                                 break;
7610                         msleep_interruptible(1000);
7611                 }
7612         }
7613
7614         if (bnx2_test_nvram(bp) != 0) {
7615                 buf[3] = 1;
7616                 etest->flags |= ETH_TEST_FL_FAILED;
7617         }
7618         if (bnx2_test_intr(bp) != 0) {
7619                 buf[4] = 1;
7620                 etest->flags |= ETH_TEST_FL_FAILED;
7621         }
7622
7623         if (bnx2_test_link(bp) != 0) {
7624                 buf[5] = 1;
7625                 etest->flags |= ETH_TEST_FL_FAILED;
7626
7627         }
7628 }
7629
7630 static void
7631 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7632 {
7633         switch (stringset) {
7634         case ETH_SS_STATS:
7635                 memcpy(buf, bnx2_stats_str_arr,
7636                         sizeof(bnx2_stats_str_arr));
7637                 break;
7638         case ETH_SS_TEST:
7639                 memcpy(buf, bnx2_tests_str_arr,
7640                         sizeof(bnx2_tests_str_arr));
7641                 break;
7642         }
7643 }
7644
7645 static void
7646 bnx2_get_ethtool_stats(struct net_device *dev,
7647                 struct ethtool_stats *stats, u64 *buf)
7648 {
7649         struct bnx2 *bp = netdev_priv(dev);
7650         int i;
7651         u32 *hw_stats = (u32 *) bp->stats_blk;
7652         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7653         u8 *stats_len_arr = NULL;
7654
7655         if (hw_stats == NULL) {
7656                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7657                 return;
7658         }
7659
7660         if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
7661             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
7662             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
7663             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
7664                 stats_len_arr = bnx2_5706_stats_len_arr;
7665         else
7666                 stats_len_arr = bnx2_5708_stats_len_arr;
7667
7668         for (i = 0; i < BNX2_NUM_STATS; i++) {
7669                 unsigned long offset;
7670
7671                 if (stats_len_arr[i] == 0) {
7672                         /* skip this counter */
7673                         buf[i] = 0;
7674                         continue;
7675                 }
7676
7677                 offset = bnx2_stats_offset_arr[i];
7678                 if (stats_len_arr[i] == 4) {
7679                         /* 4-byte counter */
7680                         buf[i] = (u64) *(hw_stats + offset) +
7681                                  *(temp_stats + offset);
7682                         continue;
7683                 }
7684                 /* 8-byte counter */
7685                 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7686                          *(hw_stats + offset + 1) +
7687                          (((u64) *(temp_stats + offset)) << 32) +
7688                          *(temp_stats + offset + 1);
7689         }
7690 }
7691
7692 static int
7693 bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7694 {
7695         struct bnx2 *bp = netdev_priv(dev);
7696
7697         switch (state) {
7698         case ETHTOOL_ID_ACTIVE:
7699                 bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7700                 BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7701                 return 1;       /* cycle on/off once per second */
7702
7703         case ETHTOOL_ID_ON:
7704                 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7705                         BNX2_EMAC_LED_1000MB_OVERRIDE |
7706                         BNX2_EMAC_LED_100MB_OVERRIDE |
7707                         BNX2_EMAC_LED_10MB_OVERRIDE |
7708                         BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7709                         BNX2_EMAC_LED_TRAFFIC);
7710                 break;
7711
7712         case ETHTOOL_ID_OFF:
7713                 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7714                 break;
7715
7716         case ETHTOOL_ID_INACTIVE:
7717                 BNX2_WR(bp, BNX2_EMAC_LED, 0);
7718                 BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7719                 break;
7720         }
7721
7722         return 0;
7723 }
7724
7725 static int
7726 bnx2_set_features(struct net_device *dev, netdev_features_t features)
7727 {
7728         struct bnx2 *bp = netdev_priv(dev);
7729
7730         /* TSO with VLAN tag won't work with current firmware */
7731         if (features & NETIF_F_HW_VLAN_CTAG_TX)
7732                 dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7733         else
7734                 dev->vlan_features &= ~NETIF_F_ALL_TSO;
7735
7736         if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
7737             !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7738             netif_running(dev)) {
7739                 bnx2_netif_stop(bp, false);
7740                 dev->features = features;
7741                 bnx2_set_rx_mode(dev);
7742                 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7743                 bnx2_netif_start(bp, false);
7744                 return 1;
7745         }
7746
7747         return 0;
7748 }
7749
7750 static void bnx2_get_channels(struct net_device *dev,
7751                               struct ethtool_channels *channels)
7752 {
7753         struct bnx2 *bp = netdev_priv(dev);
7754         u32 max_rx_rings = 1;
7755         u32 max_tx_rings = 1;
7756
7757         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7758                 max_rx_rings = RX_MAX_RINGS;
7759                 max_tx_rings = TX_MAX_RINGS;
7760         }
7761
7762         channels->max_rx = max_rx_rings;
7763         channels->max_tx = max_tx_rings;
7764         channels->max_other = 0;
7765         channels->max_combined = 0;
7766         channels->rx_count = bp->num_rx_rings;
7767         channels->tx_count = bp->num_tx_rings;
7768         channels->other_count = 0;
7769         channels->combined_count = 0;
7770 }
7771
7772 static int bnx2_set_channels(struct net_device *dev,
7773                               struct ethtool_channels *channels)
7774 {
7775         struct bnx2 *bp = netdev_priv(dev);
7776         u32 max_rx_rings = 1;
7777         u32 max_tx_rings = 1;
7778         int rc = 0;
7779
7780         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7781                 max_rx_rings = RX_MAX_RINGS;
7782                 max_tx_rings = TX_MAX_RINGS;
7783         }
7784         if (channels->rx_count > max_rx_rings ||
7785             channels->tx_count > max_tx_rings)
7786                 return -EINVAL;
7787
7788         bp->num_req_rx_rings = channels->rx_count;
7789         bp->num_req_tx_rings = channels->tx_count;
7790
7791         if (netif_running(dev))
7792                 rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7793                                            bp->tx_ring_size, true);
7794
7795         return rc;
7796 }
7797
7798 static const struct ethtool_ops bnx2_ethtool_ops = {
7799         .get_settings           = bnx2_get_settings,
7800         .set_settings           = bnx2_set_settings,
7801         .get_drvinfo            = bnx2_get_drvinfo,
7802         .get_regs_len           = bnx2_get_regs_len,
7803         .get_regs               = bnx2_get_regs,
7804         .get_wol                = bnx2_get_wol,
7805         .set_wol                = bnx2_set_wol,
7806         .nway_reset             = bnx2_nway_reset,
7807         .get_link               = bnx2_get_link,
7808         .get_eeprom_len         = bnx2_get_eeprom_len,
7809         .get_eeprom             = bnx2_get_eeprom,
7810         .set_eeprom             = bnx2_set_eeprom,
7811         .get_coalesce           = bnx2_get_coalesce,
7812         .set_coalesce           = bnx2_set_coalesce,
7813         .get_ringparam          = bnx2_get_ringparam,
7814         .set_ringparam          = bnx2_set_ringparam,
7815         .get_pauseparam         = bnx2_get_pauseparam,
7816         .set_pauseparam         = bnx2_set_pauseparam,
7817         .self_test              = bnx2_self_test,
7818         .get_strings            = bnx2_get_strings,
7819         .set_phys_id            = bnx2_set_phys_id,
7820         .get_ethtool_stats      = bnx2_get_ethtool_stats,
7821         .get_sset_count         = bnx2_get_sset_count,
7822         .get_channels           = bnx2_get_channels,
7823         .set_channels           = bnx2_set_channels,
7824 };
7825
7826 /* Called with rtnl_lock */
7827 static int
7828 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7829 {
7830         struct mii_ioctl_data *data = if_mii(ifr);
7831         struct bnx2 *bp = netdev_priv(dev);
7832         int err;
7833
7834         switch(cmd) {
7835         case SIOCGMIIPHY:
7836                 data->phy_id = bp->phy_addr;
7837
7838                 /* fallthru */
7839         case SIOCGMIIREG: {
7840                 u32 mii_regval;
7841
7842                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7843                         return -EOPNOTSUPP;
7844
7845                 if (!netif_running(dev))
7846                         return -EAGAIN;
7847
7848                 spin_lock_bh(&bp->phy_lock);
7849                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7850                 spin_unlock_bh(&bp->phy_lock);
7851
7852                 data->val_out = mii_regval;
7853
7854                 return err;
7855         }
7856
7857         case SIOCSMIIREG:
7858                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7859                         return -EOPNOTSUPP;
7860
7861                 if (!netif_running(dev))
7862                         return -EAGAIN;
7863
7864                 spin_lock_bh(&bp->phy_lock);
7865                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7866                 spin_unlock_bh(&bp->phy_lock);
7867
7868                 return err;
7869
7870         default:
7871                 /* do nothing */
7872                 break;
7873         }
7874         return -EOPNOTSUPP;
7875 }
7876
7877 /* Called with rtnl_lock */
7878 static int
7879 bnx2_change_mac_addr(struct net_device *dev, void *p)
7880 {
7881         struct sockaddr *addr = p;
7882         struct bnx2 *bp = netdev_priv(dev);
7883
7884         if (!is_valid_ether_addr(addr->sa_data))
7885                 return -EADDRNOTAVAIL;
7886
7887         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7888         if (netif_running(dev))
7889                 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7890
7891         return 0;
7892 }
7893
7894 /* Called with rtnl_lock */
7895 static int
7896 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7897 {
7898         struct bnx2 *bp = netdev_priv(dev);
7899
7900         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7901                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7902                 return -EINVAL;
7903
7904         dev->mtu = new_mtu;
7905         return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7906                                      false);
7907 }
7908
7909 #ifdef CONFIG_NET_POLL_CONTROLLER
7910 static void
7911 poll_bnx2(struct net_device *dev)
7912 {
7913         struct bnx2 *bp = netdev_priv(dev);
7914         int i;
7915
7916         for (i = 0; i < bp->irq_nvecs; i++) {
7917                 struct bnx2_irq *irq = &bp->irq_tbl[i];
7918
7919                 disable_irq(irq->vector);
7920                 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7921                 enable_irq(irq->vector);
7922         }
7923 }
7924 #endif
7925
7926 static void
7927 bnx2_get_5709_media(struct bnx2 *bp)
7928 {
7929         u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7930         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7931         u32 strap;
7932
7933         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7934                 return;
7935         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7936                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7937                 return;
7938         }
7939
7940         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7941                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7942         else
7943                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7944
7945         if (bp->func == 0) {
7946                 switch (strap) {
7947                 case 0x4:
7948                 case 0x5:
7949                 case 0x6:
7950                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7951                         return;
7952                 }
7953         } else {
7954                 switch (strap) {
7955                 case 0x1:
7956                 case 0x2:
7957                 case 0x4:
7958                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7959                         return;
7960                 }
7961         }
7962 }
7963
7964 static void
7965 bnx2_get_pci_speed(struct bnx2 *bp)
7966 {
7967         u32 reg;
7968
7969         reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
7970         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7971                 u32 clkreg;
7972
7973                 bp->flags |= BNX2_FLAG_PCIX;
7974
7975                 clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7976
7977                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7978                 switch (clkreg) {
7979                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7980                         bp->bus_speed_mhz = 133;
7981                         break;
7982
7983                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7984                         bp->bus_speed_mhz = 100;
7985                         break;
7986
7987                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7988                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7989                         bp->bus_speed_mhz = 66;
7990                         break;
7991
7992                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7993                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7994                         bp->bus_speed_mhz = 50;
7995                         break;
7996
7997                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7998                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7999                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
8000                         bp->bus_speed_mhz = 33;
8001                         break;
8002                 }
8003         }
8004         else {
8005                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
8006                         bp->bus_speed_mhz = 66;
8007                 else
8008                         bp->bus_speed_mhz = 33;
8009         }
8010
8011         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
8012                 bp->flags |= BNX2_FLAG_PCI_32BIT;
8013
8014 }
8015
8016 static void
8017 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
8018 {
8019         int rc, i, j;
8020         u8 *data;
8021         unsigned int block_end, rosize, len;
8022
8023 #define BNX2_VPD_NVRAM_OFFSET   0x300
8024 #define BNX2_VPD_LEN            128
8025 #define BNX2_MAX_VER_SLEN       30
8026
8027         data = kmalloc(256, GFP_KERNEL);
8028         if (!data)
8029                 return;
8030
8031         rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
8032                              BNX2_VPD_LEN);
8033         if (rc)
8034                 goto vpd_done;
8035
8036         for (i = 0; i < BNX2_VPD_LEN; i += 4) {
8037                 data[i] = data[i + BNX2_VPD_LEN + 3];
8038                 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
8039                 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
8040                 data[i + 3] = data[i + BNX2_VPD_LEN];
8041         }
8042
8043         i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
8044         if (i < 0)
8045                 goto vpd_done;
8046
8047         rosize = pci_vpd_lrdt_size(&data[i]);
8048         i += PCI_VPD_LRDT_TAG_SIZE;
8049         block_end = i + rosize;
8050
8051         if (block_end > BNX2_VPD_LEN)
8052                 goto vpd_done;
8053
8054         j = pci_vpd_find_info_keyword(data, i, rosize,
8055                                       PCI_VPD_RO_KEYWORD_MFR_ID);
8056         if (j < 0)
8057                 goto vpd_done;
8058
8059         len = pci_vpd_info_field_size(&data[j]);
8060
8061         j += PCI_VPD_INFO_FLD_HDR_SIZE;
8062         if (j + len > block_end || len != 4 ||
8063             memcmp(&data[j], "1028", 4))
8064                 goto vpd_done;
8065
8066         j = pci_vpd_find_info_keyword(data, i, rosize,
8067                                       PCI_VPD_RO_KEYWORD_VENDOR0);
8068         if (j < 0)
8069                 goto vpd_done;
8070
8071         len = pci_vpd_info_field_size(&data[j]);
8072
8073         j += PCI_VPD_INFO_FLD_HDR_SIZE;
8074         if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
8075                 goto vpd_done;
8076
8077         memcpy(bp->fw_version, &data[j], len);
8078         bp->fw_version[len] = ' ';
8079
8080 vpd_done:
8081         kfree(data);
8082 }
8083
8084 static int
8085 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8086 {
8087         struct bnx2 *bp;
8088         int rc, i, j;
8089         u32 reg;
8090         u64 dma_mask, persist_dma_mask;
8091         int err;
8092
8093         SET_NETDEV_DEV(dev, &pdev->dev);
8094         bp = netdev_priv(dev);
8095
8096         bp->flags = 0;
8097         bp->phy_flags = 0;
8098
8099         bp->temp_stats_blk =
8100                 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
8101
8102         if (bp->temp_stats_blk == NULL) {
8103                 rc = -ENOMEM;
8104                 goto err_out;
8105         }
8106
8107         /* enable device (incl. PCI PM wakeup), and bus-mastering */
8108         rc = pci_enable_device(pdev);
8109         if (rc) {
8110                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8111                 goto err_out;
8112         }
8113
8114         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8115                 dev_err(&pdev->dev,
8116                         "Cannot find PCI device base address, aborting\n");
8117                 rc = -ENODEV;
8118                 goto err_out_disable;
8119         }
8120
8121         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8122         if (rc) {
8123                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8124                 goto err_out_disable;
8125         }
8126
8127         pci_set_master(pdev);
8128
8129         bp->pm_cap = pdev->pm_cap;
8130         if (bp->pm_cap == 0) {
8131                 dev_err(&pdev->dev,
8132                         "Cannot find power management capability, aborting\n");
8133                 rc = -EIO;
8134                 goto err_out_release;
8135         }
8136
8137         bp->dev = dev;
8138         bp->pdev = pdev;
8139
8140         spin_lock_init(&bp->phy_lock);
8141         spin_lock_init(&bp->indirect_lock);
8142 #ifdef BCM_CNIC
8143         mutex_init(&bp->cnic_lock);
8144 #endif
8145         INIT_WORK(&bp->reset_task, bnx2_reset_task);
8146
8147         bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8148                                                          TX_MAX_TSS_RINGS + 1));
8149         if (!bp->regview) {
8150                 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8151                 rc = -ENOMEM;
8152                 goto err_out_release;
8153         }
8154
8155         /* Configure byte swap and enable write to the reg_window registers.
8156          * Rely on CPU to do target byte swapping on big endian systems
8157          * The chip's target access swapping will not swap all accesses
8158          */
8159         BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8160                 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8161                 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8162
8163         bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
8164
8165         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
8166                 if (!pci_is_pcie(pdev)) {
8167                         dev_err(&pdev->dev, "Not PCIE, aborting\n");
8168                         rc = -EIO;
8169                         goto err_out_unmap;
8170                 }
8171                 bp->flags |= BNX2_FLAG_PCIE;
8172                 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
8173                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8174
8175                 /* AER (Advanced Error Reporting) hooks */
8176                 err = pci_enable_pcie_error_reporting(pdev);
8177                 if (!err)
8178                         bp->flags |= BNX2_FLAG_AER_ENABLED;
8179
8180         } else {
8181                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8182                 if (bp->pcix_cap == 0) {
8183                         dev_err(&pdev->dev,
8184                                 "Cannot find PCIX capability, aborting\n");
8185                         rc = -EIO;
8186                         goto err_out_unmap;
8187                 }
8188                 bp->flags |= BNX2_FLAG_BROKEN_STATS;
8189         }
8190
8191         if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8192             BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
8193                 if (pdev->msix_cap)
8194                         bp->flags |= BNX2_FLAG_MSIX_CAP;
8195         }
8196
8197         if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
8198             BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
8199                 if (pdev->msi_cap)
8200                         bp->flags |= BNX2_FLAG_MSI_CAP;
8201         }
8202
8203         /* 5708 cannot support DMA addresses > 40-bit.  */
8204         if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
8205                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8206         else
8207                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8208
8209         /* Configure DMA attributes. */
8210         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8211                 dev->features |= NETIF_F_HIGHDMA;
8212                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8213                 if (rc) {
8214                         dev_err(&pdev->dev,
8215                                 "pci_set_consistent_dma_mask failed, aborting\n");
8216                         goto err_out_unmap;
8217                 }
8218         } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8219                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8220                 goto err_out_unmap;
8221         }
8222
8223         if (!(bp->flags & BNX2_FLAG_PCIE))
8224                 bnx2_get_pci_speed(bp);
8225
8226         /* 5706A0 may falsely detect SERR and PERR. */
8227         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8228                 reg = BNX2_RD(bp, PCI_COMMAND);
8229                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8230                 BNX2_WR(bp, PCI_COMMAND, reg);
8231         } else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
8232                 !(bp->flags & BNX2_FLAG_PCIX)) {
8233                 dev_err(&pdev->dev,
8234                         "5706 A1 can only be used in a PCIX bus, aborting\n");
8235                 rc = -EPERM;
8236                 goto err_out_unmap;
8237         }
8238
8239         bnx2_init_nvram(bp);
8240
8241         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8242
8243         if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
8244                 bp->func = 1;
8245
8246         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8247             BNX2_SHM_HDR_SIGNATURE_SIG) {
8248                 u32 off = bp->func << 2;
8249
8250                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8251         } else
8252                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8253
8254         /* Get the permanent MAC address.  First we need to make sure the
8255          * firmware is actually running.
8256          */
8257         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8258
8259         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8260             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8261                 dev_err(&pdev->dev, "Firmware not running, aborting\n");
8262                 rc = -ENODEV;
8263                 goto err_out_unmap;
8264         }
8265
8266         bnx2_read_vpd_fw_ver(bp);
8267
8268         j = strlen(bp->fw_version);
8269         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8270         for (i = 0; i < 3 && j < 24; i++) {
8271                 u8 num, k, skip0;
8272
8273                 if (i == 0) {
8274                         bp->fw_version[j++] = 'b';
8275                         bp->fw_version[j++] = 'c';
8276                         bp->fw_version[j++] = ' ';
8277                 }
8278                 num = (u8) (reg >> (24 - (i * 8)));
8279                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8280                         if (num >= k || !skip0 || k == 1) {
8281                                 bp->fw_version[j++] = (num / k) + '0';
8282                                 skip0 = 0;
8283                         }
8284                 }
8285                 if (i != 2)
8286                         bp->fw_version[j++] = '.';
8287         }
8288         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8289         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8290                 bp->wol = 1;
8291
8292         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8293                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8294
8295                 for (i = 0; i < 30; i++) {
8296                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8297                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8298                                 break;
8299                         msleep(10);
8300                 }
8301         }
8302         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8303         reg &= BNX2_CONDITION_MFW_RUN_MASK;
8304         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8305             reg != BNX2_CONDITION_MFW_RUN_NONE) {
8306                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8307
8308                 if (j < 32)
8309                         bp->fw_version[j++] = ' ';
8310                 for (i = 0; i < 3 && j < 28; i++) {
8311                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8312                         reg = be32_to_cpu(reg);
8313                         memcpy(&bp->fw_version[j], &reg, 4);
8314                         j += 4;
8315                 }
8316         }
8317
8318         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8319         bp->mac_addr[0] = (u8) (reg >> 8);
8320         bp->mac_addr[1] = (u8) reg;
8321
8322         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8323         bp->mac_addr[2] = (u8) (reg >> 24);
8324         bp->mac_addr[3] = (u8) (reg >> 16);
8325         bp->mac_addr[4] = (u8) (reg >> 8);
8326         bp->mac_addr[5] = (u8) reg;
8327
8328         bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
8329         bnx2_set_rx_ring_size(bp, 255);
8330
8331         bp->tx_quick_cons_trip_int = 2;
8332         bp->tx_quick_cons_trip = 20;
8333         bp->tx_ticks_int = 18;
8334         bp->tx_ticks = 80;
8335
8336         bp->rx_quick_cons_trip_int = 2;
8337         bp->rx_quick_cons_trip = 12;
8338         bp->rx_ticks_int = 18;
8339         bp->rx_ticks = 18;
8340
8341         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8342
8343         bp->current_interval = BNX2_TIMER_INTERVAL;
8344
8345         bp->phy_addr = 1;
8346
8347         /* allocate stats_blk */
8348         rc = bnx2_alloc_stats_blk(dev);
8349         if (rc)
8350                 goto err_out_unmap;
8351
8352         /* Disable WOL support if we are running on a SERDES chip. */
8353         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8354                 bnx2_get_5709_media(bp);
8355         else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
8356                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8357
8358         bp->phy_port = PORT_TP;
8359         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8360                 bp->phy_port = PORT_FIBRE;
8361                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8362                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8363                         bp->flags |= BNX2_FLAG_NO_WOL;
8364                         bp->wol = 0;
8365                 }
8366                 if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
8367                         /* Don't do parallel detect on this board because of
8368                          * some board problems.  The link will not go down
8369                          * if we do parallel detect.
8370                          */
8371                         if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8372                             pdev->subsystem_device == 0x310c)
8373                                 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8374                 } else {
8375                         bp->phy_addr = 2;
8376                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8377                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8378                 }
8379         } else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
8380                    BNX2_CHIP(bp) == BNX2_CHIP_5708)
8381                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8382         else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8383                  (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
8384                   BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
8385                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8386
8387         bnx2_init_fw_cap(bp);
8388
8389         if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
8390             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
8391             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
8392             !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8393                 bp->flags |= BNX2_FLAG_NO_WOL;
8394                 bp->wol = 0;
8395         }
8396
8397         if (bp->flags & BNX2_FLAG_NO_WOL)
8398                 device_set_wakeup_capable(&bp->pdev->dev, false);
8399         else
8400                 device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
8401
8402         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8403                 bp->tx_quick_cons_trip_int =
8404                         bp->tx_quick_cons_trip;
8405                 bp->tx_ticks_int = bp->tx_ticks;
8406                 bp->rx_quick_cons_trip_int =
8407                         bp->rx_quick_cons_trip;
8408                 bp->rx_ticks_int = bp->rx_ticks;
8409                 bp->comp_prod_trip_int = bp->comp_prod_trip;
8410                 bp->com_ticks_int = bp->com_ticks;
8411                 bp->cmd_ticks_int = bp->cmd_ticks;
8412         }
8413
8414         /* Disable MSI on 5706 if AMD 8132 bridge is found.
8415          *
8416          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8417          * with byte enables disabled on the unused 32-bit word.  This is legal
8418          * but causes problems on the AMD 8132 which will eventually stop
8419          * responding after a while.
8420          *
8421          * AMD believes this incompatibility is unique to the 5706, and
8422          * prefers to locally disable MSI rather than globally disabling it.
8423          */
8424         if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
8425                 struct pci_dev *amd_8132 = NULL;
8426
8427                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8428                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
8429                                                   amd_8132))) {
8430
8431                         if (amd_8132->revision >= 0x10 &&
8432                             amd_8132->revision <= 0x13) {
8433                                 disable_msi = 1;
8434                                 pci_dev_put(amd_8132);
8435                                 break;
8436                         }
8437                 }
8438         }
8439
8440         bnx2_set_default_link(bp);
8441         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8442
8443         init_timer(&bp->timer);
8444         bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8445         bp->timer.data = (unsigned long) bp;
8446         bp->timer.function = bnx2_timer;
8447
8448 #ifdef BCM_CNIC
8449         if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8450                 bp->cnic_eth_dev.max_iscsi_conn =
8451                         (bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8452                          BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8453         bp->cnic_probe = bnx2_cnic_probe;
8454 #endif
8455         pci_save_state(pdev);
8456
8457         return 0;
8458
8459 err_out_unmap:
8460         if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8461                 pci_disable_pcie_error_reporting(pdev);
8462                 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8463         }
8464
8465         pci_iounmap(pdev, bp->regview);
8466         bp->regview = NULL;
8467
8468 err_out_release:
8469         pci_release_regions(pdev);
8470
8471 err_out_disable:
8472         pci_disable_device(pdev);
8473
8474 err_out:
8475         kfree(bp->temp_stats_blk);
8476
8477         return rc;
8478 }
8479
8480 static char *
8481 bnx2_bus_string(struct bnx2 *bp, char *str)
8482 {
8483         char *s = str;
8484
8485         if (bp->flags & BNX2_FLAG_PCIE) {
8486                 s += sprintf(s, "PCI Express");
8487         } else {
8488                 s += sprintf(s, "PCI");
8489                 if (bp->flags & BNX2_FLAG_PCIX)
8490                         s += sprintf(s, "-X");
8491                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8492                         s += sprintf(s, " 32-bit");
8493                 else
8494                         s += sprintf(s, " 64-bit");
8495                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8496         }
8497         return str;
8498 }
8499
8500 static void
8501 bnx2_del_napi(struct bnx2 *bp)
8502 {
8503         int i;
8504
8505         for (i = 0; i < bp->irq_nvecs; i++)
8506                 netif_napi_del(&bp->bnx2_napi[i].napi);
8507 }
8508
8509 static void
8510 bnx2_init_napi(struct bnx2 *bp)
8511 {
8512         int i;
8513
8514         for (i = 0; i < bp->irq_nvecs; i++) {
8515                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8516                 int (*poll)(struct napi_struct *, int);
8517
8518                 if (i == 0)
8519                         poll = bnx2_poll;
8520                 else
8521                         poll = bnx2_poll_msix;
8522
8523                 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8524                 bnapi->bp = bp;
8525         }
8526 }
8527
8528 static const struct net_device_ops bnx2_netdev_ops = {
8529         .ndo_open               = bnx2_open,
8530         .ndo_start_xmit         = bnx2_start_xmit,
8531         .ndo_stop               = bnx2_close,
8532         .ndo_get_stats64        = bnx2_get_stats64,
8533         .ndo_set_rx_mode        = bnx2_set_rx_mode,
8534         .ndo_do_ioctl           = bnx2_ioctl,
8535         .ndo_validate_addr      = eth_validate_addr,
8536         .ndo_set_mac_address    = bnx2_change_mac_addr,
8537         .ndo_change_mtu         = bnx2_change_mtu,
8538         .ndo_set_features       = bnx2_set_features,
8539         .ndo_tx_timeout         = bnx2_tx_timeout,
8540 #ifdef CONFIG_NET_POLL_CONTROLLER
8541         .ndo_poll_controller    = poll_bnx2,
8542 #endif
8543 };
8544
8545 static int
8546 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8547 {
8548         static int version_printed = 0;
8549         struct net_device *dev;
8550         struct bnx2 *bp;
8551         int rc;
8552         char str[40];
8553
8554         if (version_printed++ == 0)
8555                 pr_info("%s", version);
8556
8557         /* dev zeroed in init_etherdev */
8558         dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8559         if (!dev)
8560                 return -ENOMEM;
8561
8562         rc = bnx2_init_board(pdev, dev);
8563         if (rc < 0)
8564                 goto err_free;
8565
8566         dev->netdev_ops = &bnx2_netdev_ops;
8567         dev->watchdog_timeo = TX_TIMEOUT;
8568         dev->ethtool_ops = &bnx2_ethtool_ops;
8569
8570         bp = netdev_priv(dev);
8571
8572         pci_set_drvdata(pdev, dev);
8573
8574         memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
8575
8576         dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8577                 NETIF_F_TSO | NETIF_F_TSO_ECN |
8578                 NETIF_F_RXHASH | NETIF_F_RXCSUM;
8579
8580         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8581                 dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8582
8583         dev->vlan_features = dev->hw_features;
8584         dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
8585         dev->features |= dev->hw_features;
8586         dev->priv_flags |= IFF_UNICAST_FLT;
8587
8588         if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
8589                 dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
8590
8591         if ((rc = register_netdev(dev))) {
8592                 dev_err(&pdev->dev, "Cannot register net device\n");
8593                 goto error;
8594         }
8595
8596         netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8597                     "node addr %pM\n", board_info[ent->driver_data].name,
8598                     ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8599                     ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
8600                     bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8601                     pdev->irq, dev->dev_addr);
8602
8603         return 0;
8604
8605 error:
8606         pci_iounmap(pdev, bp->regview);
8607         pci_release_regions(pdev);
8608         pci_disable_device(pdev);
8609 err_free:
8610         bnx2_free_stats_blk(dev);
8611         free_netdev(dev);
8612         return rc;
8613 }
8614
8615 static void
8616 bnx2_remove_one(struct pci_dev *pdev)
8617 {
8618         struct net_device *dev = pci_get_drvdata(pdev);
8619         struct bnx2 *bp = netdev_priv(dev);
8620
8621         unregister_netdev(dev);
8622
8623         del_timer_sync(&bp->timer);
8624         cancel_work_sync(&bp->reset_task);
8625
8626         pci_iounmap(bp->pdev, bp->regview);
8627
8628         bnx2_free_stats_blk(dev);
8629         kfree(bp->temp_stats_blk);
8630
8631         if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8632                 pci_disable_pcie_error_reporting(pdev);
8633                 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8634         }
8635
8636         bnx2_release_firmware(bp);
8637
8638         free_netdev(dev);
8639
8640         pci_release_regions(pdev);
8641         pci_disable_device(pdev);
8642 }
8643
8644 #ifdef CONFIG_PM_SLEEP
8645 static int
8646 bnx2_suspend(struct device *device)
8647 {
8648         struct pci_dev *pdev = to_pci_dev(device);
8649         struct net_device *dev = pci_get_drvdata(pdev);
8650         struct bnx2 *bp = netdev_priv(dev);
8651
8652         if (netif_running(dev)) {
8653                 cancel_work_sync(&bp->reset_task);
8654                 bnx2_netif_stop(bp, true);
8655                 netif_device_detach(dev);
8656                 del_timer_sync(&bp->timer);
8657                 bnx2_shutdown_chip(bp);
8658                 __bnx2_free_irq(bp);
8659                 bnx2_free_skbs(bp);
8660         }
8661         bnx2_setup_wol(bp);
8662         return 0;
8663 }
8664
8665 static int
8666 bnx2_resume(struct device *device)
8667 {
8668         struct pci_dev *pdev = to_pci_dev(device);
8669         struct net_device *dev = pci_get_drvdata(pdev);
8670         struct bnx2 *bp = netdev_priv(dev);
8671
8672         if (!netif_running(dev))
8673                 return 0;
8674
8675         bnx2_set_power_state(bp, PCI_D0);
8676         netif_device_attach(dev);
8677         bnx2_request_irq(bp);
8678         bnx2_init_nic(bp, 1);
8679         bnx2_netif_start(bp, true);
8680         return 0;
8681 }
8682
8683 static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
8684 #define BNX2_PM_OPS (&bnx2_pm_ops)
8685
8686 #else
8687
8688 #define BNX2_PM_OPS NULL
8689
8690 #endif /* CONFIG_PM_SLEEP */
8691 /**
8692  * bnx2_io_error_detected - called when PCI error is detected
8693  * @pdev: Pointer to PCI device
8694  * @state: The current pci connection state
8695  *
8696  * This function is called after a PCI bus error affecting
8697  * this device has been detected.
8698  */
8699 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8700                                                pci_channel_state_t state)
8701 {
8702         struct net_device *dev = pci_get_drvdata(pdev);
8703         struct bnx2 *bp = netdev_priv(dev);
8704
8705         rtnl_lock();
8706         netif_device_detach(dev);
8707
8708         if (state == pci_channel_io_perm_failure) {
8709                 rtnl_unlock();
8710                 return PCI_ERS_RESULT_DISCONNECT;
8711         }
8712
8713         if (netif_running(dev)) {
8714                 bnx2_netif_stop(bp, true);
8715                 del_timer_sync(&bp->timer);
8716                 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8717         }
8718
8719         pci_disable_device(pdev);
8720         rtnl_unlock();
8721
8722         /* Request a slot slot reset. */
8723         return PCI_ERS_RESULT_NEED_RESET;
8724 }
8725
8726 /**
8727  * bnx2_io_slot_reset - called after the pci bus has been reset.
8728  * @pdev: Pointer to PCI device
8729  *
8730  * Restart the card from scratch, as if from a cold-boot.
8731  */
8732 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8733 {
8734         struct net_device *dev = pci_get_drvdata(pdev);
8735         struct bnx2 *bp = netdev_priv(dev);
8736         pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
8737         int err = 0;
8738
8739         rtnl_lock();
8740         if (pci_enable_device(pdev)) {
8741                 dev_err(&pdev->dev,
8742                         "Cannot re-enable PCI device after reset\n");
8743         } else {
8744                 pci_set_master(pdev);
8745                 pci_restore_state(pdev);
8746                 pci_save_state(pdev);
8747
8748                 if (netif_running(dev))
8749                         err = bnx2_init_nic(bp, 1);
8750
8751                 if (!err)
8752                         result = PCI_ERS_RESULT_RECOVERED;
8753         }
8754
8755         if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
8756                 bnx2_napi_enable(bp);
8757                 dev_close(dev);
8758         }
8759         rtnl_unlock();
8760
8761         if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8762                 return result;
8763
8764         err = pci_cleanup_aer_uncorrect_error_status(pdev);
8765         if (err) {
8766                 dev_err(&pdev->dev,
8767                         "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8768                          err); /* non-fatal, continue */
8769         }
8770
8771         return result;
8772 }
8773
8774 /**
8775  * bnx2_io_resume - called when traffic can start flowing again.
8776  * @pdev: Pointer to PCI device
8777  *
8778  * This callback is called when the error recovery driver tells us that
8779  * its OK to resume normal operation.
8780  */
8781 static void bnx2_io_resume(struct pci_dev *pdev)
8782 {
8783         struct net_device *dev = pci_get_drvdata(pdev);
8784         struct bnx2 *bp = netdev_priv(dev);
8785
8786         rtnl_lock();
8787         if (netif_running(dev))
8788                 bnx2_netif_start(bp, true);
8789
8790         netif_device_attach(dev);
8791         rtnl_unlock();
8792 }
8793
8794 static void bnx2_shutdown(struct pci_dev *pdev)
8795 {
8796         struct net_device *dev = pci_get_drvdata(pdev);
8797         struct bnx2 *bp;
8798
8799         if (!dev)
8800                 return;
8801
8802         bp = netdev_priv(dev);
8803         if (!bp)
8804                 return;
8805
8806         rtnl_lock();
8807         if (netif_running(dev))
8808                 dev_close(bp->dev);
8809
8810         if (system_state == SYSTEM_POWER_OFF)
8811                 bnx2_set_power_state(bp, PCI_D3hot);
8812
8813         rtnl_unlock();
8814 }
8815
8816 static const struct pci_error_handlers bnx2_err_handler = {
8817         .error_detected = bnx2_io_error_detected,
8818         .slot_reset     = bnx2_io_slot_reset,
8819         .resume         = bnx2_io_resume,
8820 };
8821
8822 static struct pci_driver bnx2_pci_driver = {
8823         .name           = DRV_MODULE_NAME,
8824         .id_table       = bnx2_pci_tbl,
8825         .probe          = bnx2_init_one,
8826         .remove         = bnx2_remove_one,
8827         .driver.pm      = BNX2_PM_OPS,
8828         .err_handler    = &bnx2_err_handler,
8829         .shutdown       = bnx2_shutdown,
8830 };
8831
8832 module_pci_driver(bnx2_pci_driver);