GNU Linux-libre 4.14.302-gnu1
[releases.git] / drivers / net / ethernet / broadcom / bnx2.c
1 /* bnx2.c: QLogic bnx2 network driver.
2  *
3  * Copyright (c) 2004-2014 Broadcom Corporation
4  * Copyright (c) 2014-2015 QLogic Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  *
10  * Written by: Michael Chan  (mchan@broadcom.com)
11  */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17
18 #include <linux/stringify.h>
19 #include <linux/kernel.h>
20 #include <linux/timer.h>
21 #include <linux/errno.h>
22 #include <linux/ioport.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <linux/interrupt.h>
26 #include <linux/pci.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/skbuff.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/bitops.h>
32 #include <asm/io.h>
33 #include <asm/irq.h>
34 #include <linux/delay.h>
35 #include <asm/byteorder.h>
36 #include <asm/page.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
40 #include <linux/if.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/aer.h>
52 #include <linux/crash_dump.h>
53
54 #if IS_ENABLED(CONFIG_CNIC)
55 #define BCM_CNIC 1
56 #include "cnic_if.h"
57 #endif
58 #include "bnx2.h"
59 #include "bnx2_fw.h"
60
61 #define DRV_MODULE_NAME         "bnx2"
62 #define DRV_MODULE_VERSION      "2.2.6"
63 #define DRV_MODULE_RELDATE      "January 29, 2014"
64 #define FW_MIPS_FILE_06         "/*(DEBLOBBED)*/"
65 #define FW_RV2P_FILE_06         "/*(DEBLOBBED)*/"
66 #define FW_MIPS_FILE_09         "/*(DEBLOBBED)*/"
67 #define FW_RV2P_FILE_09_Ax      "/*(DEBLOBBED)*/"
68 #define FW_RV2P_FILE_09         "/*(DEBLOBBED)*/"
69
70 #define RUN_AT(x) (jiffies + (x))
71
72 /* Time in jiffies before concluding the transmitter is hung. */
73 #define TX_TIMEOUT  (5*HZ)
74
75 static char version[] =
76         "QLogic " DRV_MODULE_NAME " Gigabit Ethernet Driver v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
77
78 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
79 MODULE_DESCRIPTION("QLogic BCM5706/5708/5709/5716 Driver");
80 MODULE_LICENSE("GPL");
81 MODULE_VERSION(DRV_MODULE_VERSION);
82 /*(DEBLOBBED)*/
83
84 static int disable_msi = 0;
85
86 module_param(disable_msi, int, S_IRUGO);
87 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
88
89 typedef enum {
90         BCM5706 = 0,
91         NC370T,
92         NC370I,
93         BCM5706S,
94         NC370F,
95         BCM5708,
96         BCM5708S,
97         BCM5709,
98         BCM5709S,
99         BCM5716,
100         BCM5716S,
101 } board_t;
102
103 /* indexed by board_t, above */
104 static struct {
105         char *name;
106 } board_info[] = {
107         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
108         { "HP NC370T Multifunction Gigabit Server Adapter" },
109         { "HP NC370i Multifunction Gigabit Server Adapter" },
110         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
111         { "HP NC370F Multifunction Gigabit Server Adapter" },
112         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
113         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
114         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
115         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
116         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
117         { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
118         };
119
120 static const struct pci_device_id bnx2_pci_tbl[] = {
121         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
122           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
123         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
124           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
125         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
126           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
127         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
128           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
129         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
130           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
131         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
132           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
133         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
134           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
135         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
136           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
137         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
138           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
139         { PCI_VENDOR_ID_BROADCOM, 0x163b,
140           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
141         { PCI_VENDOR_ID_BROADCOM, 0x163c,
142           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
143         { 0, }
144 };
145
146 static const struct flash_spec flash_table[] =
147 {
148 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
149 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
150         /* Slow EEPROM */
151         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
152          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
153          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
154          "EEPROM - slow"},
155         /* Expansion entry 0001 */
156         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
157          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159          "Entry 0001"},
160         /* Saifun SA25F010 (non-buffered flash) */
161         /* strap, cfg1, & write1 need updates */
162         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
163          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
164          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
165          "Non-buffered flash (128kB)"},
166         /* Saifun SA25F020 (non-buffered flash) */
167         /* strap, cfg1, & write1 need updates */
168         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
169          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
170          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
171          "Non-buffered flash (256kB)"},
172         /* Expansion entry 0100 */
173         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
174          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
175          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
176          "Entry 0100"},
177         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
178         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
179          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
180          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
181          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
182         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
183         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
184          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
185          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
186          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
187         /* Saifun SA25F005 (non-buffered flash) */
188         /* strap, cfg1, & write1 need updates */
189         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
190          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
191          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
192          "Non-buffered flash (64kB)"},
193         /* Fast EEPROM */
194         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
195          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
196          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
197          "EEPROM - fast"},
198         /* Expansion entry 1001 */
199         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
200          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
201          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
202          "Entry 1001"},
203         /* Expansion entry 1010 */
204         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
205          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
206          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
207          "Entry 1010"},
208         /* ATMEL AT45DB011B (buffered flash) */
209         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
210          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
211          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
212          "Buffered flash (128kB)"},
213         /* Expansion entry 1100 */
214         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
215          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
216          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
217          "Entry 1100"},
218         /* Expansion entry 1101 */
219         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
220          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
221          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
222          "Entry 1101"},
223         /* Ateml Expansion entry 1110 */
224         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
225          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
226          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
227          "Entry 1110 (Atmel)"},
228         /* ATMEL AT45DB021B (buffered flash) */
229         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
230          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
231          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
232          "Buffered flash (256kB)"},
233 };
234
235 static const struct flash_spec flash_5709 = {
236         .flags          = BNX2_NV_BUFFERED,
237         .page_bits      = BCM5709_FLASH_PAGE_BITS,
238         .page_size      = BCM5709_FLASH_PAGE_SIZE,
239         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
240         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
241         .name           = "5709 Buffered flash (256kB)",
242 };
243
244 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
245
246 static void bnx2_init_napi(struct bnx2 *bp);
247 static void bnx2_del_napi(struct bnx2 *bp);
248
249 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
250 {
251         u32 diff;
252
253         /* The ring uses 256 indices for 255 entries, one of them
254          * needs to be skipped.
255          */
256         diff = READ_ONCE(txr->tx_prod) - READ_ONCE(txr->tx_cons);
257         if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
258                 diff &= 0xffff;
259                 if (diff == BNX2_TX_DESC_CNT)
260                         diff = BNX2_MAX_TX_DESC_CNT;
261         }
262         return bp->tx_ring_size - diff;
263 }
264
265 static u32
266 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
267 {
268         unsigned long flags;
269         u32 val;
270
271         spin_lock_irqsave(&bp->indirect_lock, flags);
272         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
273         val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
274         spin_unlock_irqrestore(&bp->indirect_lock, flags);
275         return val;
276 }
277
278 static void
279 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
280 {
281         unsigned long flags;
282
283         spin_lock_irqsave(&bp->indirect_lock, flags);
284         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
285         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
286         spin_unlock_irqrestore(&bp->indirect_lock, flags);
287 }
288
289 static void
290 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
291 {
292         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
293 }
294
295 static u32
296 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
297 {
298         return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
299 }
300
301 static void
302 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
303 {
304         unsigned long flags;
305
306         offset += cid_addr;
307         spin_lock_irqsave(&bp->indirect_lock, flags);
308         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
309                 int i;
310
311                 BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
312                 BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
313                         offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
314                 for (i = 0; i < 5; i++) {
315                         val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
316                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
317                                 break;
318                         udelay(5);
319                 }
320         } else {
321                 BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
322                 BNX2_WR(bp, BNX2_CTX_DATA, val);
323         }
324         spin_unlock_irqrestore(&bp->indirect_lock, flags);
325 }
326
327 #ifdef BCM_CNIC
328 static int
329 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
330 {
331         struct bnx2 *bp = netdev_priv(dev);
332         struct drv_ctl_io *io = &info->data.io;
333
334         switch (info->cmd) {
335         case DRV_CTL_IO_WR_CMD:
336                 bnx2_reg_wr_ind(bp, io->offset, io->data);
337                 break;
338         case DRV_CTL_IO_RD_CMD:
339                 io->data = bnx2_reg_rd_ind(bp, io->offset);
340                 break;
341         case DRV_CTL_CTX_WR_CMD:
342                 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
343                 break;
344         default:
345                 return -EINVAL;
346         }
347         return 0;
348 }
349
350 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
351 {
352         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
353         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
354         int sb_id;
355
356         if (bp->flags & BNX2_FLAG_USING_MSIX) {
357                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
358                 bnapi->cnic_present = 0;
359                 sb_id = bp->irq_nvecs;
360                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
361         } else {
362                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
363                 bnapi->cnic_tag = bnapi->last_status_idx;
364                 bnapi->cnic_present = 1;
365                 sb_id = 0;
366                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
367         }
368
369         cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
370         cp->irq_arr[0].status_blk = (void *)
371                 ((unsigned long) bnapi->status_blk.msi +
372                 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
373         cp->irq_arr[0].status_blk_num = sb_id;
374         cp->num_irq = 1;
375 }
376
377 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
378                               void *data)
379 {
380         struct bnx2 *bp = netdev_priv(dev);
381         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
382
383         if (ops == NULL)
384                 return -EINVAL;
385
386         if (cp->drv_state & CNIC_DRV_STATE_REGD)
387                 return -EBUSY;
388
389         if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
390                 return -ENODEV;
391
392         bp->cnic_data = data;
393         rcu_assign_pointer(bp->cnic_ops, ops);
394
395         cp->num_irq = 0;
396         cp->drv_state = CNIC_DRV_STATE_REGD;
397
398         bnx2_setup_cnic_irq_info(bp);
399
400         return 0;
401 }
402
403 static int bnx2_unregister_cnic(struct net_device *dev)
404 {
405         struct bnx2 *bp = netdev_priv(dev);
406         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
407         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
408
409         mutex_lock(&bp->cnic_lock);
410         cp->drv_state = 0;
411         bnapi->cnic_present = 0;
412         RCU_INIT_POINTER(bp->cnic_ops, NULL);
413         mutex_unlock(&bp->cnic_lock);
414         synchronize_rcu();
415         return 0;
416 }
417
418 static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
419 {
420         struct bnx2 *bp = netdev_priv(dev);
421         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
422
423         if (!cp->max_iscsi_conn)
424                 return NULL;
425
426         cp->drv_owner = THIS_MODULE;
427         cp->chip_id = bp->chip_id;
428         cp->pdev = bp->pdev;
429         cp->io_base = bp->regview;
430         cp->drv_ctl = bnx2_drv_ctl;
431         cp->drv_register_cnic = bnx2_register_cnic;
432         cp->drv_unregister_cnic = bnx2_unregister_cnic;
433
434         return cp;
435 }
436
437 static void
438 bnx2_cnic_stop(struct bnx2 *bp)
439 {
440         struct cnic_ops *c_ops;
441         struct cnic_ctl_info info;
442
443         mutex_lock(&bp->cnic_lock);
444         c_ops = rcu_dereference_protected(bp->cnic_ops,
445                                           lockdep_is_held(&bp->cnic_lock));
446         if (c_ops) {
447                 info.cmd = CNIC_CTL_STOP_CMD;
448                 c_ops->cnic_ctl(bp->cnic_data, &info);
449         }
450         mutex_unlock(&bp->cnic_lock);
451 }
452
453 static void
454 bnx2_cnic_start(struct bnx2 *bp)
455 {
456         struct cnic_ops *c_ops;
457         struct cnic_ctl_info info;
458
459         mutex_lock(&bp->cnic_lock);
460         c_ops = rcu_dereference_protected(bp->cnic_ops,
461                                           lockdep_is_held(&bp->cnic_lock));
462         if (c_ops) {
463                 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
464                         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
465
466                         bnapi->cnic_tag = bnapi->last_status_idx;
467                 }
468                 info.cmd = CNIC_CTL_START_CMD;
469                 c_ops->cnic_ctl(bp->cnic_data, &info);
470         }
471         mutex_unlock(&bp->cnic_lock);
472 }
473
474 #else
475
476 static void
477 bnx2_cnic_stop(struct bnx2 *bp)
478 {
479 }
480
481 static void
482 bnx2_cnic_start(struct bnx2 *bp)
483 {
484 }
485
486 #endif
487
488 static int
489 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
490 {
491         u32 val1;
492         int i, ret;
493
494         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
495                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
496                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
497
498                 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
499                 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
500
501                 udelay(40);
502         }
503
504         val1 = (bp->phy_addr << 21) | (reg << 16) |
505                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
506                 BNX2_EMAC_MDIO_COMM_START_BUSY;
507         BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
508
509         for (i = 0; i < 50; i++) {
510                 udelay(10);
511
512                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
513                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
514                         udelay(5);
515
516                         val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
517                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
518
519                         break;
520                 }
521         }
522
523         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
524                 *val = 0x0;
525                 ret = -EBUSY;
526         }
527         else {
528                 *val = val1;
529                 ret = 0;
530         }
531
532         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
533                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
534                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
535
536                 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
537                 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
538
539                 udelay(40);
540         }
541
542         return ret;
543 }
544
545 static int
546 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
547 {
548         u32 val1;
549         int i, ret;
550
551         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
552                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
553                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
554
555                 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
556                 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
557
558                 udelay(40);
559         }
560
561         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
562                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
563                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
564         BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
565
566         for (i = 0; i < 50; i++) {
567                 udelay(10);
568
569                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
570                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
571                         udelay(5);
572                         break;
573                 }
574         }
575
576         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
577                 ret = -EBUSY;
578         else
579                 ret = 0;
580
581         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
582                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
583                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
584
585                 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
586                 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
587
588                 udelay(40);
589         }
590
591         return ret;
592 }
593
594 static void
595 bnx2_disable_int(struct bnx2 *bp)
596 {
597         int i;
598         struct bnx2_napi *bnapi;
599
600         for (i = 0; i < bp->irq_nvecs; i++) {
601                 bnapi = &bp->bnx2_napi[i];
602                 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
603                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
604         }
605         BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
606 }
607
608 static void
609 bnx2_enable_int(struct bnx2 *bp)
610 {
611         int i;
612         struct bnx2_napi *bnapi;
613
614         for (i = 0; i < bp->irq_nvecs; i++) {
615                 bnapi = &bp->bnx2_napi[i];
616
617                 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
618                         BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
619                         BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
620                         bnapi->last_status_idx);
621
622                 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
623                         BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
624                         bnapi->last_status_idx);
625         }
626         BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
627 }
628
629 static void
630 bnx2_disable_int_sync(struct bnx2 *bp)
631 {
632         int i;
633
634         atomic_inc(&bp->intr_sem);
635         if (!netif_running(bp->dev))
636                 return;
637
638         bnx2_disable_int(bp);
639         for (i = 0; i < bp->irq_nvecs; i++)
640                 synchronize_irq(bp->irq_tbl[i].vector);
641 }
642
643 static void
644 bnx2_napi_disable(struct bnx2 *bp)
645 {
646         int i;
647
648         for (i = 0; i < bp->irq_nvecs; i++)
649                 napi_disable(&bp->bnx2_napi[i].napi);
650 }
651
652 static void
653 bnx2_napi_enable(struct bnx2 *bp)
654 {
655         int i;
656
657         for (i = 0; i < bp->irq_nvecs; i++)
658                 napi_enable(&bp->bnx2_napi[i].napi);
659 }
660
661 static void
662 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
663 {
664         if (stop_cnic)
665                 bnx2_cnic_stop(bp);
666         if (netif_running(bp->dev)) {
667                 bnx2_napi_disable(bp);
668                 netif_tx_disable(bp->dev);
669         }
670         bnx2_disable_int_sync(bp);
671         netif_carrier_off(bp->dev);     /* prevent tx timeout */
672 }
673
674 static void
675 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
676 {
677         if (atomic_dec_and_test(&bp->intr_sem)) {
678                 if (netif_running(bp->dev)) {
679                         netif_tx_wake_all_queues(bp->dev);
680                         spin_lock_bh(&bp->phy_lock);
681                         if (bp->link_up)
682                                 netif_carrier_on(bp->dev);
683                         spin_unlock_bh(&bp->phy_lock);
684                         bnx2_napi_enable(bp);
685                         bnx2_enable_int(bp);
686                         if (start_cnic)
687                                 bnx2_cnic_start(bp);
688                 }
689         }
690 }
691
692 static void
693 bnx2_free_tx_mem(struct bnx2 *bp)
694 {
695         int i;
696
697         for (i = 0; i < bp->num_tx_rings; i++) {
698                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
699                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
700
701                 if (txr->tx_desc_ring) {
702                         dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
703                                           txr->tx_desc_ring,
704                                           txr->tx_desc_mapping);
705                         txr->tx_desc_ring = NULL;
706                 }
707                 kfree(txr->tx_buf_ring);
708                 txr->tx_buf_ring = NULL;
709         }
710 }
711
712 static void
713 bnx2_free_rx_mem(struct bnx2 *bp)
714 {
715         int i;
716
717         for (i = 0; i < bp->num_rx_rings; i++) {
718                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
719                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
720                 int j;
721
722                 for (j = 0; j < bp->rx_max_ring; j++) {
723                         if (rxr->rx_desc_ring[j])
724                                 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
725                                                   rxr->rx_desc_ring[j],
726                                                   rxr->rx_desc_mapping[j]);
727                         rxr->rx_desc_ring[j] = NULL;
728                 }
729                 vfree(rxr->rx_buf_ring);
730                 rxr->rx_buf_ring = NULL;
731
732                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
733                         if (rxr->rx_pg_desc_ring[j])
734                                 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
735                                                   rxr->rx_pg_desc_ring[j],
736                                                   rxr->rx_pg_desc_mapping[j]);
737                         rxr->rx_pg_desc_ring[j] = NULL;
738                 }
739                 vfree(rxr->rx_pg_ring);
740                 rxr->rx_pg_ring = NULL;
741         }
742 }
743
744 static int
745 bnx2_alloc_tx_mem(struct bnx2 *bp)
746 {
747         int i;
748
749         for (i = 0; i < bp->num_tx_rings; i++) {
750                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
751                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
752
753                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
754                 if (txr->tx_buf_ring == NULL)
755                         return -ENOMEM;
756
757                 txr->tx_desc_ring =
758                         dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
759                                            &txr->tx_desc_mapping, GFP_KERNEL);
760                 if (txr->tx_desc_ring == NULL)
761                         return -ENOMEM;
762         }
763         return 0;
764 }
765
766 static int
767 bnx2_alloc_rx_mem(struct bnx2 *bp)
768 {
769         int i;
770
771         for (i = 0; i < bp->num_rx_rings; i++) {
772                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
773                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
774                 int j;
775
776                 rxr->rx_buf_ring =
777                         vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
778                 if (rxr->rx_buf_ring == NULL)
779                         return -ENOMEM;
780
781                 for (j = 0; j < bp->rx_max_ring; j++) {
782                         rxr->rx_desc_ring[j] =
783                                 dma_alloc_coherent(&bp->pdev->dev,
784                                                    RXBD_RING_SIZE,
785                                                    &rxr->rx_desc_mapping[j],
786                                                    GFP_KERNEL);
787                         if (rxr->rx_desc_ring[j] == NULL)
788                                 return -ENOMEM;
789
790                 }
791
792                 if (bp->rx_pg_ring_size) {
793                         rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
794                                                   bp->rx_max_pg_ring);
795                         if (rxr->rx_pg_ring == NULL)
796                                 return -ENOMEM;
797
798                 }
799
800                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
801                         rxr->rx_pg_desc_ring[j] =
802                                 dma_alloc_coherent(&bp->pdev->dev,
803                                                    RXBD_RING_SIZE,
804                                                    &rxr->rx_pg_desc_mapping[j],
805                                                    GFP_KERNEL);
806                         if (rxr->rx_pg_desc_ring[j] == NULL)
807                                 return -ENOMEM;
808
809                 }
810         }
811         return 0;
812 }
813
814 static void
815 bnx2_free_stats_blk(struct net_device *dev)
816 {
817         struct bnx2 *bp = netdev_priv(dev);
818
819         if (bp->status_blk) {
820                 dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
821                                   bp->status_blk,
822                                   bp->status_blk_mapping);
823                 bp->status_blk = NULL;
824                 bp->stats_blk = NULL;
825         }
826 }
827
828 static int
829 bnx2_alloc_stats_blk(struct net_device *dev)
830 {
831         int status_blk_size;
832         void *status_blk;
833         struct bnx2 *bp = netdev_priv(dev);
834
835         /* Combine status and statistics blocks into one allocation. */
836         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
837         if (bp->flags & BNX2_FLAG_MSIX_CAP)
838                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
839                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
840         bp->status_stats_size = status_blk_size +
841                                 sizeof(struct statistics_block);
842         status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
843                                          &bp->status_blk_mapping, GFP_KERNEL);
844         if (status_blk == NULL)
845                 return -ENOMEM;
846
847         bp->status_blk = status_blk;
848         bp->stats_blk = status_blk + status_blk_size;
849         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
850
851         return 0;
852 }
853
854 static void
855 bnx2_free_mem(struct bnx2 *bp)
856 {
857         int i;
858         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
859
860         bnx2_free_tx_mem(bp);
861         bnx2_free_rx_mem(bp);
862
863         for (i = 0; i < bp->ctx_pages; i++) {
864                 if (bp->ctx_blk[i]) {
865                         dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
866                                           bp->ctx_blk[i],
867                                           bp->ctx_blk_mapping[i]);
868                         bp->ctx_blk[i] = NULL;
869                 }
870         }
871
872         if (bnapi->status_blk.msi)
873                 bnapi->status_blk.msi = NULL;
874 }
875
876 static int
877 bnx2_alloc_mem(struct bnx2 *bp)
878 {
879         int i, err;
880         struct bnx2_napi *bnapi;
881
882         bnapi = &bp->bnx2_napi[0];
883         bnapi->status_blk.msi = bp->status_blk;
884         bnapi->hw_tx_cons_ptr =
885                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
886         bnapi->hw_rx_cons_ptr =
887                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
888         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
889                 for (i = 1; i < bp->irq_nvecs; i++) {
890                         struct status_block_msix *sblk;
891
892                         bnapi = &bp->bnx2_napi[i];
893
894                         sblk = (bp->status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
895                         bnapi->status_blk.msix = sblk;
896                         bnapi->hw_tx_cons_ptr =
897                                 &sblk->status_tx_quick_consumer_index;
898                         bnapi->hw_rx_cons_ptr =
899                                 &sblk->status_rx_quick_consumer_index;
900                         bnapi->int_num = i << 24;
901                 }
902         }
903
904         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
905                 bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
906                 if (bp->ctx_pages == 0)
907                         bp->ctx_pages = 1;
908                 for (i = 0; i < bp->ctx_pages; i++) {
909                         bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
910                                                 BNX2_PAGE_SIZE,
911                                                 &bp->ctx_blk_mapping[i],
912                                                 GFP_KERNEL);
913                         if (bp->ctx_blk[i] == NULL)
914                                 goto alloc_mem_err;
915                 }
916         }
917
918         err = bnx2_alloc_rx_mem(bp);
919         if (err)
920                 goto alloc_mem_err;
921
922         err = bnx2_alloc_tx_mem(bp);
923         if (err)
924                 goto alloc_mem_err;
925
926         return 0;
927
928 alloc_mem_err:
929         bnx2_free_mem(bp);
930         return -ENOMEM;
931 }
932
933 static void
934 bnx2_report_fw_link(struct bnx2 *bp)
935 {
936         u32 fw_link_status = 0;
937
938         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
939                 return;
940
941         if (bp->link_up) {
942                 u32 bmsr;
943
944                 switch (bp->line_speed) {
945                 case SPEED_10:
946                         if (bp->duplex == DUPLEX_HALF)
947                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
948                         else
949                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
950                         break;
951                 case SPEED_100:
952                         if (bp->duplex == DUPLEX_HALF)
953                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
954                         else
955                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
956                         break;
957                 case SPEED_1000:
958                         if (bp->duplex == DUPLEX_HALF)
959                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
960                         else
961                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
962                         break;
963                 case SPEED_2500:
964                         if (bp->duplex == DUPLEX_HALF)
965                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
966                         else
967                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
968                         break;
969                 }
970
971                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
972
973                 if (bp->autoneg) {
974                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
975
976                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
977                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
978
979                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
980                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
981                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
982                         else
983                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
984                 }
985         }
986         else
987                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
988
989         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
990 }
991
992 static char *
993 bnx2_xceiver_str(struct bnx2 *bp)
994 {
995         return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
996                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
997                  "Copper");
998 }
999
1000 static void
1001 bnx2_report_link(struct bnx2 *bp)
1002 {
1003         if (bp->link_up) {
1004                 netif_carrier_on(bp->dev);
1005                 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
1006                             bnx2_xceiver_str(bp),
1007                             bp->line_speed,
1008                             bp->duplex == DUPLEX_FULL ? "full" : "half");
1009
1010                 if (bp->flow_ctrl) {
1011                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
1012                                 pr_cont(", receive ");
1013                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1014                                         pr_cont("& transmit ");
1015                         }
1016                         else {
1017                                 pr_cont(", transmit ");
1018                         }
1019                         pr_cont("flow control ON");
1020                 }
1021                 pr_cont("\n");
1022         } else {
1023                 netif_carrier_off(bp->dev);
1024                 netdev_err(bp->dev, "NIC %s Link is Down\n",
1025                            bnx2_xceiver_str(bp));
1026         }
1027
1028         bnx2_report_fw_link(bp);
1029 }
1030
1031 static void
1032 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1033 {
1034         u32 local_adv, remote_adv;
1035
1036         bp->flow_ctrl = 0;
1037         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1038                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1039
1040                 if (bp->duplex == DUPLEX_FULL) {
1041                         bp->flow_ctrl = bp->req_flow_ctrl;
1042                 }
1043                 return;
1044         }
1045
1046         if (bp->duplex != DUPLEX_FULL) {
1047                 return;
1048         }
1049
1050         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1051             (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
1052                 u32 val;
1053
1054                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1055                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1056                         bp->flow_ctrl |= FLOW_CTRL_TX;
1057                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1058                         bp->flow_ctrl |= FLOW_CTRL_RX;
1059                 return;
1060         }
1061
1062         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1063         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1064
1065         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1066                 u32 new_local_adv = 0;
1067                 u32 new_remote_adv = 0;
1068
1069                 if (local_adv & ADVERTISE_1000XPAUSE)
1070                         new_local_adv |= ADVERTISE_PAUSE_CAP;
1071                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1072                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
1073                 if (remote_adv & ADVERTISE_1000XPAUSE)
1074                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
1075                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1076                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1077
1078                 local_adv = new_local_adv;
1079                 remote_adv = new_remote_adv;
1080         }
1081
1082         /* See Table 28B-3 of 802.3ab-1999 spec. */
1083         if (local_adv & ADVERTISE_PAUSE_CAP) {
1084                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1085                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1086                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1087                         }
1088                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1089                                 bp->flow_ctrl = FLOW_CTRL_RX;
1090                         }
1091                 }
1092                 else {
1093                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1094                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1095                         }
1096                 }
1097         }
1098         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1099                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1100                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1101
1102                         bp->flow_ctrl = FLOW_CTRL_TX;
1103                 }
1104         }
1105 }
1106
1107 static int
1108 bnx2_5709s_linkup(struct bnx2 *bp)
1109 {
1110         u32 val, speed;
1111
1112         bp->link_up = 1;
1113
1114         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1115         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1116         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1117
1118         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1119                 bp->line_speed = bp->req_line_speed;
1120                 bp->duplex = bp->req_duplex;
1121                 return 0;
1122         }
1123         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1124         switch (speed) {
1125                 case MII_BNX2_GP_TOP_AN_SPEED_10:
1126                         bp->line_speed = SPEED_10;
1127                         break;
1128                 case MII_BNX2_GP_TOP_AN_SPEED_100:
1129                         bp->line_speed = SPEED_100;
1130                         break;
1131                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1132                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1133                         bp->line_speed = SPEED_1000;
1134                         break;
1135                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1136                         bp->line_speed = SPEED_2500;
1137                         break;
1138         }
1139         if (val & MII_BNX2_GP_TOP_AN_FD)
1140                 bp->duplex = DUPLEX_FULL;
1141         else
1142                 bp->duplex = DUPLEX_HALF;
1143         return 0;
1144 }
1145
1146 static int
1147 bnx2_5708s_linkup(struct bnx2 *bp)
1148 {
1149         u32 val;
1150
1151         bp->link_up = 1;
1152         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1153         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1154                 case BCM5708S_1000X_STAT1_SPEED_10:
1155                         bp->line_speed = SPEED_10;
1156                         break;
1157                 case BCM5708S_1000X_STAT1_SPEED_100:
1158                         bp->line_speed = SPEED_100;
1159                         break;
1160                 case BCM5708S_1000X_STAT1_SPEED_1G:
1161                         bp->line_speed = SPEED_1000;
1162                         break;
1163                 case BCM5708S_1000X_STAT1_SPEED_2G5:
1164                         bp->line_speed = SPEED_2500;
1165                         break;
1166         }
1167         if (val & BCM5708S_1000X_STAT1_FD)
1168                 bp->duplex = DUPLEX_FULL;
1169         else
1170                 bp->duplex = DUPLEX_HALF;
1171
1172         return 0;
1173 }
1174
1175 static int
1176 bnx2_5706s_linkup(struct bnx2 *bp)
1177 {
1178         u32 bmcr, local_adv, remote_adv, common;
1179
1180         bp->link_up = 1;
1181         bp->line_speed = SPEED_1000;
1182
1183         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1184         if (bmcr & BMCR_FULLDPLX) {
1185                 bp->duplex = DUPLEX_FULL;
1186         }
1187         else {
1188                 bp->duplex = DUPLEX_HALF;
1189         }
1190
1191         if (!(bmcr & BMCR_ANENABLE)) {
1192                 return 0;
1193         }
1194
1195         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1196         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1197
1198         common = local_adv & remote_adv;
1199         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1200
1201                 if (common & ADVERTISE_1000XFULL) {
1202                         bp->duplex = DUPLEX_FULL;
1203                 }
1204                 else {
1205                         bp->duplex = DUPLEX_HALF;
1206                 }
1207         }
1208
1209         return 0;
1210 }
1211
1212 static int
1213 bnx2_copper_linkup(struct bnx2 *bp)
1214 {
1215         u32 bmcr;
1216
1217         bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX;
1218
1219         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1220         if (bmcr & BMCR_ANENABLE) {
1221                 u32 local_adv, remote_adv, common;
1222
1223                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1224                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1225
1226                 common = local_adv & (remote_adv >> 2);
1227                 if (common & ADVERTISE_1000FULL) {
1228                         bp->line_speed = SPEED_1000;
1229                         bp->duplex = DUPLEX_FULL;
1230                 }
1231                 else if (common & ADVERTISE_1000HALF) {
1232                         bp->line_speed = SPEED_1000;
1233                         bp->duplex = DUPLEX_HALF;
1234                 }
1235                 else {
1236                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1237                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1238
1239                         common = local_adv & remote_adv;
1240                         if (common & ADVERTISE_100FULL) {
1241                                 bp->line_speed = SPEED_100;
1242                                 bp->duplex = DUPLEX_FULL;
1243                         }
1244                         else if (common & ADVERTISE_100HALF) {
1245                                 bp->line_speed = SPEED_100;
1246                                 bp->duplex = DUPLEX_HALF;
1247                         }
1248                         else if (common & ADVERTISE_10FULL) {
1249                                 bp->line_speed = SPEED_10;
1250                                 bp->duplex = DUPLEX_FULL;
1251                         }
1252                         else if (common & ADVERTISE_10HALF) {
1253                                 bp->line_speed = SPEED_10;
1254                                 bp->duplex = DUPLEX_HALF;
1255                         }
1256                         else {
1257                                 bp->line_speed = 0;
1258                                 bp->link_up = 0;
1259                         }
1260                 }
1261         }
1262         else {
1263                 if (bmcr & BMCR_SPEED100) {
1264                         bp->line_speed = SPEED_100;
1265                 }
1266                 else {
1267                         bp->line_speed = SPEED_10;
1268                 }
1269                 if (bmcr & BMCR_FULLDPLX) {
1270                         bp->duplex = DUPLEX_FULL;
1271                 }
1272                 else {
1273                         bp->duplex = DUPLEX_HALF;
1274                 }
1275         }
1276
1277         if (bp->link_up) {
1278                 u32 ext_status;
1279
1280                 bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status);
1281                 if (ext_status & EXT_STATUS_MDIX)
1282                         bp->phy_flags |= BNX2_PHY_FLAG_MDIX;
1283         }
1284
1285         return 0;
1286 }
1287
1288 static void
1289 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1290 {
1291         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1292
1293         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1294         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1295         val |= 0x02 << 8;
1296
1297         if (bp->flow_ctrl & FLOW_CTRL_TX)
1298                 val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1299
1300         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1301 }
1302
1303 static void
1304 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1305 {
1306         int i;
1307         u32 cid;
1308
1309         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1310                 if (i == 1)
1311                         cid = RX_RSS_CID;
1312                 bnx2_init_rx_context(bp, cid);
1313         }
1314 }
1315
1316 static void
1317 bnx2_set_mac_link(struct bnx2 *bp)
1318 {
1319         u32 val;
1320
1321         BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1322         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1323                 (bp->duplex == DUPLEX_HALF)) {
1324                 BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1325         }
1326
1327         /* Configure the EMAC mode register. */
1328         val = BNX2_RD(bp, BNX2_EMAC_MODE);
1329
1330         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1331                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1332                 BNX2_EMAC_MODE_25G_MODE);
1333
1334         if (bp->link_up) {
1335                 switch (bp->line_speed) {
1336                         case SPEED_10:
1337                                 if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
1338                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1339                                         break;
1340                                 }
1341                                 /* fall through */
1342                         case SPEED_100:
1343                                 val |= BNX2_EMAC_MODE_PORT_MII;
1344                                 break;
1345                         case SPEED_2500:
1346                                 val |= BNX2_EMAC_MODE_25G_MODE;
1347                                 /* fall through */
1348                         case SPEED_1000:
1349                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1350                                 break;
1351                 }
1352         }
1353         else {
1354                 val |= BNX2_EMAC_MODE_PORT_GMII;
1355         }
1356
1357         /* Set the MAC to operate in the appropriate duplex mode. */
1358         if (bp->duplex == DUPLEX_HALF)
1359                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1360         BNX2_WR(bp, BNX2_EMAC_MODE, val);
1361
1362         /* Enable/disable rx PAUSE. */
1363         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1364
1365         if (bp->flow_ctrl & FLOW_CTRL_RX)
1366                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1367         BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1368
1369         /* Enable/disable tx PAUSE. */
1370         val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1371         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1372
1373         if (bp->flow_ctrl & FLOW_CTRL_TX)
1374                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1375         BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1376
1377         /* Acknowledge the interrupt. */
1378         BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1379
1380         bnx2_init_all_rx_contexts(bp);
1381 }
1382
1383 static void
1384 bnx2_enable_bmsr1(struct bnx2 *bp)
1385 {
1386         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1387             (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1388                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1389                                MII_BNX2_BLK_ADDR_GP_STATUS);
1390 }
1391
1392 static void
1393 bnx2_disable_bmsr1(struct bnx2 *bp)
1394 {
1395         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1396             (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1397                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1398                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1399 }
1400
1401 static int
1402 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1403 {
1404         u32 up1;
1405         int ret = 1;
1406
1407         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1408                 return 0;
1409
1410         if (bp->autoneg & AUTONEG_SPEED)
1411                 bp->advertising |= ADVERTISED_2500baseX_Full;
1412
1413         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1414                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1415
1416         bnx2_read_phy(bp, bp->mii_up1, &up1);
1417         if (!(up1 & BCM5708S_UP1_2G5)) {
1418                 up1 |= BCM5708S_UP1_2G5;
1419                 bnx2_write_phy(bp, bp->mii_up1, up1);
1420                 ret = 0;
1421         }
1422
1423         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1424                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1425                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1426
1427         return ret;
1428 }
1429
1430 static int
1431 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1432 {
1433         u32 up1;
1434         int ret = 0;
1435
1436         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1437                 return 0;
1438
1439         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1440                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1441
1442         bnx2_read_phy(bp, bp->mii_up1, &up1);
1443         if (up1 & BCM5708S_UP1_2G5) {
1444                 up1 &= ~BCM5708S_UP1_2G5;
1445                 bnx2_write_phy(bp, bp->mii_up1, up1);
1446                 ret = 1;
1447         }
1448
1449         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1450                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1451                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1452
1453         return ret;
1454 }
1455
1456 static void
1457 bnx2_enable_forced_2g5(struct bnx2 *bp)
1458 {
1459         u32 uninitialized_var(bmcr);
1460         int err;
1461
1462         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1463                 return;
1464
1465         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1466                 u32 val;
1467
1468                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1469                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1470                 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1471                         val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1472                         val |= MII_BNX2_SD_MISC1_FORCE |
1473                                 MII_BNX2_SD_MISC1_FORCE_2_5G;
1474                         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1475                 }
1476
1477                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1478                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1479                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1480
1481         } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1482                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1483                 if (!err)
1484                         bmcr |= BCM5708S_BMCR_FORCE_2500;
1485         } else {
1486                 return;
1487         }
1488
1489         if (err)
1490                 return;
1491
1492         if (bp->autoneg & AUTONEG_SPEED) {
1493                 bmcr &= ~BMCR_ANENABLE;
1494                 if (bp->req_duplex == DUPLEX_FULL)
1495                         bmcr |= BMCR_FULLDPLX;
1496         }
1497         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1498 }
1499
1500 static void
1501 bnx2_disable_forced_2g5(struct bnx2 *bp)
1502 {
1503         u32 uninitialized_var(bmcr);
1504         int err;
1505
1506         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1507                 return;
1508
1509         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1510                 u32 val;
1511
1512                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1513                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1514                 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1515                         val &= ~MII_BNX2_SD_MISC1_FORCE;
1516                         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1517                 }
1518
1519                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1520                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1521                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1522
1523         } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1524                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1525                 if (!err)
1526                         bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1527         } else {
1528                 return;
1529         }
1530
1531         if (err)
1532                 return;
1533
1534         if (bp->autoneg & AUTONEG_SPEED)
1535                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1536         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1537 }
1538
1539 static void
1540 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1541 {
1542         u32 val;
1543
1544         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1545         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1546         if (start)
1547                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1548         else
1549                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1550 }
1551
1552 static int
1553 bnx2_set_link(struct bnx2 *bp)
1554 {
1555         u32 bmsr;
1556         u8 link_up;
1557
1558         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1559                 bp->link_up = 1;
1560                 return 0;
1561         }
1562
1563         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1564                 return 0;
1565
1566         link_up = bp->link_up;
1567
1568         bnx2_enable_bmsr1(bp);
1569         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1570         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1571         bnx2_disable_bmsr1(bp);
1572
1573         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1574             (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
1575                 u32 val, an_dbg;
1576
1577                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1578                         bnx2_5706s_force_link_dn(bp, 0);
1579                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1580                 }
1581                 val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1582
1583                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1584                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1585                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1586
1587                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1588                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1589                         bmsr |= BMSR_LSTATUS;
1590                 else
1591                         bmsr &= ~BMSR_LSTATUS;
1592         }
1593
1594         if (bmsr & BMSR_LSTATUS) {
1595                 bp->link_up = 1;
1596
1597                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1598                         if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
1599                                 bnx2_5706s_linkup(bp);
1600                         else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
1601                                 bnx2_5708s_linkup(bp);
1602                         else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1603                                 bnx2_5709s_linkup(bp);
1604                 }
1605                 else {
1606                         bnx2_copper_linkup(bp);
1607                 }
1608                 bnx2_resolve_flow_ctrl(bp);
1609         }
1610         else {
1611                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1612                     (bp->autoneg & AUTONEG_SPEED))
1613                         bnx2_disable_forced_2g5(bp);
1614
1615                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1616                         u32 bmcr;
1617
1618                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1619                         bmcr |= BMCR_ANENABLE;
1620                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1621
1622                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1623                 }
1624                 bp->link_up = 0;
1625         }
1626
1627         if (bp->link_up != link_up) {
1628                 bnx2_report_link(bp);
1629         }
1630
1631         bnx2_set_mac_link(bp);
1632
1633         return 0;
1634 }
1635
1636 static int
1637 bnx2_reset_phy(struct bnx2 *bp)
1638 {
1639         int i;
1640         u32 reg;
1641
1642         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1643
1644 #define PHY_RESET_MAX_WAIT 100
1645         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1646                 udelay(10);
1647
1648                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1649                 if (!(reg & BMCR_RESET)) {
1650                         udelay(20);
1651                         break;
1652                 }
1653         }
1654         if (i == PHY_RESET_MAX_WAIT) {
1655                 return -EBUSY;
1656         }
1657         return 0;
1658 }
1659
1660 static u32
1661 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1662 {
1663         u32 adv = 0;
1664
1665         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1666                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1667
1668                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1669                         adv = ADVERTISE_1000XPAUSE;
1670                 }
1671                 else {
1672                         adv = ADVERTISE_PAUSE_CAP;
1673                 }
1674         }
1675         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1676                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1677                         adv = ADVERTISE_1000XPSE_ASYM;
1678                 }
1679                 else {
1680                         adv = ADVERTISE_PAUSE_ASYM;
1681                 }
1682         }
1683         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1684                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1685                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1686                 }
1687                 else {
1688                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1689                 }
1690         }
1691         return adv;
1692 }
1693
1694 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1695
1696 static int
1697 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1698 __releases(&bp->phy_lock)
1699 __acquires(&bp->phy_lock)
1700 {
1701         u32 speed_arg = 0, pause_adv;
1702
1703         pause_adv = bnx2_phy_get_pause_adv(bp);
1704
1705         if (bp->autoneg & AUTONEG_SPEED) {
1706                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1707                 if (bp->advertising & ADVERTISED_10baseT_Half)
1708                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1709                 if (bp->advertising & ADVERTISED_10baseT_Full)
1710                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1711                 if (bp->advertising & ADVERTISED_100baseT_Half)
1712                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1713                 if (bp->advertising & ADVERTISED_100baseT_Full)
1714                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1715                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1716                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1717                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1718                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1719         } else {
1720                 if (bp->req_line_speed == SPEED_2500)
1721                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1722                 else if (bp->req_line_speed == SPEED_1000)
1723                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1724                 else if (bp->req_line_speed == SPEED_100) {
1725                         if (bp->req_duplex == DUPLEX_FULL)
1726                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1727                         else
1728                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1729                 } else if (bp->req_line_speed == SPEED_10) {
1730                         if (bp->req_duplex == DUPLEX_FULL)
1731                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1732                         else
1733                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1734                 }
1735         }
1736
1737         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1738                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1739         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1740                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1741
1742         if (port == PORT_TP)
1743                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1744                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1745
1746         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1747
1748         spin_unlock_bh(&bp->phy_lock);
1749         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1750         spin_lock_bh(&bp->phy_lock);
1751
1752         return 0;
1753 }
1754
1755 static int
1756 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1757 __releases(&bp->phy_lock)
1758 __acquires(&bp->phy_lock)
1759 {
1760         u32 adv, bmcr;
1761         u32 new_adv = 0;
1762
1763         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1764                 return bnx2_setup_remote_phy(bp, port);
1765
1766         if (!(bp->autoneg & AUTONEG_SPEED)) {
1767                 u32 new_bmcr;
1768                 int force_link_down = 0;
1769
1770                 if (bp->req_line_speed == SPEED_2500) {
1771                         if (!bnx2_test_and_enable_2g5(bp))
1772                                 force_link_down = 1;
1773                 } else if (bp->req_line_speed == SPEED_1000) {
1774                         if (bnx2_test_and_disable_2g5(bp))
1775                                 force_link_down = 1;
1776                 }
1777                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1778                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1779
1780                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1781                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1782                 new_bmcr |= BMCR_SPEED1000;
1783
1784                 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1785                         if (bp->req_line_speed == SPEED_2500)
1786                                 bnx2_enable_forced_2g5(bp);
1787                         else if (bp->req_line_speed == SPEED_1000) {
1788                                 bnx2_disable_forced_2g5(bp);
1789                                 new_bmcr &= ~0x2000;
1790                         }
1791
1792                 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1793                         if (bp->req_line_speed == SPEED_2500)
1794                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1795                         else
1796                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1797                 }
1798
1799                 if (bp->req_duplex == DUPLEX_FULL) {
1800                         adv |= ADVERTISE_1000XFULL;
1801                         new_bmcr |= BMCR_FULLDPLX;
1802                 }
1803                 else {
1804                         adv |= ADVERTISE_1000XHALF;
1805                         new_bmcr &= ~BMCR_FULLDPLX;
1806                 }
1807                 if ((new_bmcr != bmcr) || (force_link_down)) {
1808                         /* Force a link down visible on the other side */
1809                         if (bp->link_up) {
1810                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1811                                                ~(ADVERTISE_1000XFULL |
1812                                                  ADVERTISE_1000XHALF));
1813                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1814                                         BMCR_ANRESTART | BMCR_ANENABLE);
1815
1816                                 bp->link_up = 0;
1817                                 netif_carrier_off(bp->dev);
1818                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1819                                 bnx2_report_link(bp);
1820                         }
1821                         bnx2_write_phy(bp, bp->mii_adv, adv);
1822                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1823                 } else {
1824                         bnx2_resolve_flow_ctrl(bp);
1825                         bnx2_set_mac_link(bp);
1826                 }
1827                 return 0;
1828         }
1829
1830         bnx2_test_and_enable_2g5(bp);
1831
1832         if (bp->advertising & ADVERTISED_1000baseT_Full)
1833                 new_adv |= ADVERTISE_1000XFULL;
1834
1835         new_adv |= bnx2_phy_get_pause_adv(bp);
1836
1837         bnx2_read_phy(bp, bp->mii_adv, &adv);
1838         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1839
1840         bp->serdes_an_pending = 0;
1841         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1842                 /* Force a link down visible on the other side */
1843                 if (bp->link_up) {
1844                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1845                         spin_unlock_bh(&bp->phy_lock);
1846                         msleep(20);
1847                         spin_lock_bh(&bp->phy_lock);
1848                 }
1849
1850                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1851                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1852                         BMCR_ANENABLE);
1853                 /* Speed up link-up time when the link partner
1854                  * does not autonegotiate which is very common
1855                  * in blade servers. Some blade servers use
1856                  * IPMI for kerboard input and it's important
1857                  * to minimize link disruptions. Autoneg. involves
1858                  * exchanging base pages plus 3 next pages and
1859                  * normally completes in about 120 msec.
1860                  */
1861                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1862                 bp->serdes_an_pending = 1;
1863                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1864         } else {
1865                 bnx2_resolve_flow_ctrl(bp);
1866                 bnx2_set_mac_link(bp);
1867         }
1868
1869         return 0;
1870 }
1871
1872 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1873         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1874                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1875                 (ADVERTISED_1000baseT_Full)
1876
1877 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1878         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1879         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1880         ADVERTISED_1000baseT_Full)
1881
1882 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1883         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1884
1885 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1886
1887 static void
1888 bnx2_set_default_remote_link(struct bnx2 *bp)
1889 {
1890         u32 link;
1891
1892         if (bp->phy_port == PORT_TP)
1893                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1894         else
1895                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1896
1897         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1898                 bp->req_line_speed = 0;
1899                 bp->autoneg |= AUTONEG_SPEED;
1900                 bp->advertising = ADVERTISED_Autoneg;
1901                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1902                         bp->advertising |= ADVERTISED_10baseT_Half;
1903                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1904                         bp->advertising |= ADVERTISED_10baseT_Full;
1905                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1906                         bp->advertising |= ADVERTISED_100baseT_Half;
1907                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1908                         bp->advertising |= ADVERTISED_100baseT_Full;
1909                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1910                         bp->advertising |= ADVERTISED_1000baseT_Full;
1911                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1912                         bp->advertising |= ADVERTISED_2500baseX_Full;
1913         } else {
1914                 bp->autoneg = 0;
1915                 bp->advertising = 0;
1916                 bp->req_duplex = DUPLEX_FULL;
1917                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1918                         bp->req_line_speed = SPEED_10;
1919                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1920                                 bp->req_duplex = DUPLEX_HALF;
1921                 }
1922                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1923                         bp->req_line_speed = SPEED_100;
1924                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1925                                 bp->req_duplex = DUPLEX_HALF;
1926                 }
1927                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1928                         bp->req_line_speed = SPEED_1000;
1929                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1930                         bp->req_line_speed = SPEED_2500;
1931         }
1932 }
1933
1934 static void
1935 bnx2_set_default_link(struct bnx2 *bp)
1936 {
1937         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1938                 bnx2_set_default_remote_link(bp);
1939                 return;
1940         }
1941
1942         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1943         bp->req_line_speed = 0;
1944         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1945                 u32 reg;
1946
1947                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1948
1949                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1950                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1951                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1952                         bp->autoneg = 0;
1953                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1954                         bp->req_duplex = DUPLEX_FULL;
1955                 }
1956         } else
1957                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1958 }
1959
1960 static void
1961 bnx2_send_heart_beat(struct bnx2 *bp)
1962 {
1963         u32 msg;
1964         u32 addr;
1965
1966         spin_lock(&bp->indirect_lock);
1967         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1968         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1969         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1970         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1971         spin_unlock(&bp->indirect_lock);
1972 }
1973
1974 static void
1975 bnx2_remote_phy_event(struct bnx2 *bp)
1976 {
1977         u32 msg;
1978         u8 link_up = bp->link_up;
1979         u8 old_port;
1980
1981         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1982
1983         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1984                 bnx2_send_heart_beat(bp);
1985
1986         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1987
1988         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1989                 bp->link_up = 0;
1990         else {
1991                 u32 speed;
1992
1993                 bp->link_up = 1;
1994                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1995                 bp->duplex = DUPLEX_FULL;
1996                 switch (speed) {
1997                         case BNX2_LINK_STATUS_10HALF:
1998                                 bp->duplex = DUPLEX_HALF;
1999                                 /* fall through */
2000                         case BNX2_LINK_STATUS_10FULL:
2001                                 bp->line_speed = SPEED_10;
2002                                 break;
2003                         case BNX2_LINK_STATUS_100HALF:
2004                                 bp->duplex = DUPLEX_HALF;
2005                                 /* fall through */
2006                         case BNX2_LINK_STATUS_100BASE_T4:
2007                         case BNX2_LINK_STATUS_100FULL:
2008                                 bp->line_speed = SPEED_100;
2009                                 break;
2010                         case BNX2_LINK_STATUS_1000HALF:
2011                                 bp->duplex = DUPLEX_HALF;
2012                                 /* fall through */
2013                         case BNX2_LINK_STATUS_1000FULL:
2014                                 bp->line_speed = SPEED_1000;
2015                                 break;
2016                         case BNX2_LINK_STATUS_2500HALF:
2017                                 bp->duplex = DUPLEX_HALF;
2018                                 /* fall through */
2019                         case BNX2_LINK_STATUS_2500FULL:
2020                                 bp->line_speed = SPEED_2500;
2021                                 break;
2022                         default:
2023                                 bp->line_speed = 0;
2024                                 break;
2025                 }
2026
2027                 bp->flow_ctrl = 0;
2028                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2029                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2030                         if (bp->duplex == DUPLEX_FULL)
2031                                 bp->flow_ctrl = bp->req_flow_ctrl;
2032                 } else {
2033                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2034                                 bp->flow_ctrl |= FLOW_CTRL_TX;
2035                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2036                                 bp->flow_ctrl |= FLOW_CTRL_RX;
2037                 }
2038
2039                 old_port = bp->phy_port;
2040                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2041                         bp->phy_port = PORT_FIBRE;
2042                 else
2043                         bp->phy_port = PORT_TP;
2044
2045                 if (old_port != bp->phy_port)
2046                         bnx2_set_default_link(bp);
2047
2048         }
2049         if (bp->link_up != link_up)
2050                 bnx2_report_link(bp);
2051
2052         bnx2_set_mac_link(bp);
2053 }
2054
2055 static int
2056 bnx2_set_remote_link(struct bnx2 *bp)
2057 {
2058         u32 evt_code;
2059
2060         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2061         switch (evt_code) {
2062                 case BNX2_FW_EVT_CODE_LINK_EVENT:
2063                         bnx2_remote_phy_event(bp);
2064                         break;
2065                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2066                 default:
2067                         bnx2_send_heart_beat(bp);
2068                         break;
2069         }
2070         return 0;
2071 }
2072
2073 static int
2074 bnx2_setup_copper_phy(struct bnx2 *bp)
2075 __releases(&bp->phy_lock)
2076 __acquires(&bp->phy_lock)
2077 {
2078         u32 bmcr, adv_reg, new_adv = 0;
2079         u32 new_bmcr;
2080
2081         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2082
2083         bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2084         adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2085                     ADVERTISE_PAUSE_ASYM);
2086
2087         new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising);
2088
2089         if (bp->autoneg & AUTONEG_SPEED) {
2090                 u32 adv1000_reg;
2091                 u32 new_adv1000 = 0;
2092
2093                 new_adv |= bnx2_phy_get_pause_adv(bp);
2094
2095                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2096                 adv1000_reg &= PHY_ALL_1000_SPEED;
2097
2098                 new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2099                 if ((adv1000_reg != new_adv1000) ||
2100                         (adv_reg != new_adv) ||
2101                         ((bmcr & BMCR_ANENABLE) == 0)) {
2102
2103                         bnx2_write_phy(bp, bp->mii_adv, new_adv);
2104                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2105                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2106                                 BMCR_ANENABLE);
2107                 }
2108                 else if (bp->link_up) {
2109                         /* Flow ctrl may have changed from auto to forced */
2110                         /* or vice-versa. */
2111
2112                         bnx2_resolve_flow_ctrl(bp);
2113                         bnx2_set_mac_link(bp);
2114                 }
2115                 return 0;
2116         }
2117
2118         /* advertise nothing when forcing speed */
2119         if (adv_reg != new_adv)
2120                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
2121
2122         new_bmcr = 0;
2123         if (bp->req_line_speed == SPEED_100) {
2124                 new_bmcr |= BMCR_SPEED100;
2125         }
2126         if (bp->req_duplex == DUPLEX_FULL) {
2127                 new_bmcr |= BMCR_FULLDPLX;
2128         }
2129         if (new_bmcr != bmcr) {
2130                 u32 bmsr;
2131
2132                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2133                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2134
2135                 if (bmsr & BMSR_LSTATUS) {
2136                         /* Force link down */
2137                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2138                         spin_unlock_bh(&bp->phy_lock);
2139                         msleep(50);
2140                         spin_lock_bh(&bp->phy_lock);
2141
2142                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2143                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2144                 }
2145
2146                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2147
2148                 /* Normally, the new speed is setup after the link has
2149                  * gone down and up again. In some cases, link will not go
2150                  * down so we need to set up the new speed here.
2151                  */
2152                 if (bmsr & BMSR_LSTATUS) {
2153                         bp->line_speed = bp->req_line_speed;
2154                         bp->duplex = bp->req_duplex;
2155                         bnx2_resolve_flow_ctrl(bp);
2156                         bnx2_set_mac_link(bp);
2157                 }
2158         } else {
2159                 bnx2_resolve_flow_ctrl(bp);
2160                 bnx2_set_mac_link(bp);
2161         }
2162         return 0;
2163 }
2164
2165 static int
2166 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2167 __releases(&bp->phy_lock)
2168 __acquires(&bp->phy_lock)
2169 {
2170         if (bp->loopback == MAC_LOOPBACK)
2171                 return 0;
2172
2173         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2174                 return bnx2_setup_serdes_phy(bp, port);
2175         }
2176         else {
2177                 return bnx2_setup_copper_phy(bp);
2178         }
2179 }
2180
2181 static int
2182 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2183 {
2184         u32 val;
2185
2186         bp->mii_bmcr = MII_BMCR + 0x10;
2187         bp->mii_bmsr = MII_BMSR + 0x10;
2188         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2189         bp->mii_adv = MII_ADVERTISE + 0x10;
2190         bp->mii_lpa = MII_LPA + 0x10;
2191         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2192
2193         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2194         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2195
2196         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2197         if (reset_phy)
2198                 bnx2_reset_phy(bp);
2199
2200         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2201
2202         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2203         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2204         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2205         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2206
2207         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2208         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2209         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2210                 val |= BCM5708S_UP1_2G5;
2211         else
2212                 val &= ~BCM5708S_UP1_2G5;
2213         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2214
2215         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2216         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2217         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2218         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2219
2220         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2221
2222         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2223               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2224         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2225
2226         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2227
2228         return 0;
2229 }
2230
2231 static int
2232 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2233 {
2234         u32 val;
2235
2236         if (reset_phy)
2237                 bnx2_reset_phy(bp);
2238
2239         bp->mii_up1 = BCM5708S_UP1;
2240
2241         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2242         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2243         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2244
2245         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2246         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2247         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2248
2249         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2250         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2251         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2252
2253         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2254                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2255                 val |= BCM5708S_UP1_2G5;
2256                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2257         }
2258
2259         if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
2260             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
2261             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
2262                 /* increase tx signal amplitude */
2263                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2264                                BCM5708S_BLK_ADDR_TX_MISC);
2265                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2266                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2267                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2268                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2269         }
2270
2271         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2272               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2273
2274         if (val) {
2275                 u32 is_backplane;
2276
2277                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2278                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2279                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2280                                        BCM5708S_BLK_ADDR_TX_MISC);
2281                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2282                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2283                                        BCM5708S_BLK_ADDR_DIG);
2284                 }
2285         }
2286         return 0;
2287 }
2288
2289 static int
2290 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2291 {
2292         if (reset_phy)
2293                 bnx2_reset_phy(bp);
2294
2295         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2296
2297         if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2298                 BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2299
2300         if (bp->dev->mtu > ETH_DATA_LEN) {
2301                 u32 val;
2302
2303                 /* Set extended packet length bit */
2304                 bnx2_write_phy(bp, 0x18, 0x7);
2305                 bnx2_read_phy(bp, 0x18, &val);
2306                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2307
2308                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2309                 bnx2_read_phy(bp, 0x1c, &val);
2310                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2311         }
2312         else {
2313                 u32 val;
2314
2315                 bnx2_write_phy(bp, 0x18, 0x7);
2316                 bnx2_read_phy(bp, 0x18, &val);
2317                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2318
2319                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2320                 bnx2_read_phy(bp, 0x1c, &val);
2321                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2322         }
2323
2324         return 0;
2325 }
2326
2327 static int
2328 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2329 {
2330         u32 val;
2331
2332         if (reset_phy)
2333                 bnx2_reset_phy(bp);
2334
2335         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2336                 bnx2_write_phy(bp, 0x18, 0x0c00);
2337                 bnx2_write_phy(bp, 0x17, 0x000a);
2338                 bnx2_write_phy(bp, 0x15, 0x310b);
2339                 bnx2_write_phy(bp, 0x17, 0x201f);
2340                 bnx2_write_phy(bp, 0x15, 0x9506);
2341                 bnx2_write_phy(bp, 0x17, 0x401f);
2342                 bnx2_write_phy(bp, 0x15, 0x14e2);
2343                 bnx2_write_phy(bp, 0x18, 0x0400);
2344         }
2345
2346         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2347                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2348                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2349                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2350                 val &= ~(1 << 8);
2351                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2352         }
2353
2354         if (bp->dev->mtu > ETH_DATA_LEN) {
2355                 /* Set extended packet length bit */
2356                 bnx2_write_phy(bp, 0x18, 0x7);
2357                 bnx2_read_phy(bp, 0x18, &val);
2358                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2359
2360                 bnx2_read_phy(bp, 0x10, &val);
2361                 bnx2_write_phy(bp, 0x10, val | 0x1);
2362         }
2363         else {
2364                 bnx2_write_phy(bp, 0x18, 0x7);
2365                 bnx2_read_phy(bp, 0x18, &val);
2366                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2367
2368                 bnx2_read_phy(bp, 0x10, &val);
2369                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2370         }
2371
2372         /* ethernet@wirespeed */
2373         bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL);
2374         bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val);
2375         val |=  AUX_CTL_MISC_CTL_WR | AUX_CTL_MISC_CTL_WIRESPEED;
2376
2377         /* auto-mdix */
2378         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2379                 val |=  AUX_CTL_MISC_CTL_AUTOMDIX;
2380
2381         bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val);
2382         return 0;
2383 }
2384
2385
2386 static int
2387 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2388 __releases(&bp->phy_lock)
2389 __acquires(&bp->phy_lock)
2390 {
2391         u32 val;
2392         int rc = 0;
2393
2394         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2395         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2396
2397         bp->mii_bmcr = MII_BMCR;
2398         bp->mii_bmsr = MII_BMSR;
2399         bp->mii_bmsr1 = MII_BMSR;
2400         bp->mii_adv = MII_ADVERTISE;
2401         bp->mii_lpa = MII_LPA;
2402
2403         BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2404
2405         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2406                 goto setup_phy;
2407
2408         bnx2_read_phy(bp, MII_PHYSID1, &val);
2409         bp->phy_id = val << 16;
2410         bnx2_read_phy(bp, MII_PHYSID2, &val);
2411         bp->phy_id |= val & 0xffff;
2412
2413         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2414                 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2415                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2416                 else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
2417                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2418                 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2419                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2420         }
2421         else {
2422                 rc = bnx2_init_copper_phy(bp, reset_phy);
2423         }
2424
2425 setup_phy:
2426         if (!rc)
2427                 rc = bnx2_setup_phy(bp, bp->phy_port);
2428
2429         return rc;
2430 }
2431
2432 static int
2433 bnx2_set_mac_loopback(struct bnx2 *bp)
2434 {
2435         u32 mac_mode;
2436
2437         mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2438         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2439         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2440         BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2441         bp->link_up = 1;
2442         return 0;
2443 }
2444
2445 static int bnx2_test_link(struct bnx2 *);
2446
2447 static int
2448 bnx2_set_phy_loopback(struct bnx2 *bp)
2449 {
2450         u32 mac_mode;
2451         int rc, i;
2452
2453         spin_lock_bh(&bp->phy_lock);
2454         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2455                             BMCR_SPEED1000);
2456         spin_unlock_bh(&bp->phy_lock);
2457         if (rc)
2458                 return rc;
2459
2460         for (i = 0; i < 10; i++) {
2461                 if (bnx2_test_link(bp) == 0)
2462                         break;
2463                 msleep(100);
2464         }
2465
2466         mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2467         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2468                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2469                       BNX2_EMAC_MODE_25G_MODE);
2470
2471         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2472         BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2473         bp->link_up = 1;
2474         return 0;
2475 }
2476
2477 static void
2478 bnx2_dump_mcp_state(struct bnx2 *bp)
2479 {
2480         struct net_device *dev = bp->dev;
2481         u32 mcp_p0, mcp_p1;
2482
2483         netdev_err(dev, "<--- start MCP states dump --->\n");
2484         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
2485                 mcp_p0 = BNX2_MCP_STATE_P0;
2486                 mcp_p1 = BNX2_MCP_STATE_P1;
2487         } else {
2488                 mcp_p0 = BNX2_MCP_STATE_P0_5708;
2489                 mcp_p1 = BNX2_MCP_STATE_P1_5708;
2490         }
2491         netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2492                    bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2493         netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2494                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2495                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2496                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2497         netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2498                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2499                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2500                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2501         netdev_err(dev, "DEBUG: shmem states:\n");
2502         netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2503                    bnx2_shmem_rd(bp, BNX2_DRV_MB),
2504                    bnx2_shmem_rd(bp, BNX2_FW_MB),
2505                    bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2506         pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2507         netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2508                    bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2509                    bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2510         pr_cont(" condition[%08x]\n",
2511                 bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2512         DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
2513         DP_SHMEM_LINE(bp, 0x3cc);
2514         DP_SHMEM_LINE(bp, 0x3dc);
2515         DP_SHMEM_LINE(bp, 0x3ec);
2516         netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2517         netdev_err(dev, "<--- end MCP states dump --->\n");
2518 }
2519
2520 static int
2521 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2522 {
2523         int i;
2524         u32 val;
2525
2526         bp->fw_wr_seq++;
2527         msg_data |= bp->fw_wr_seq;
2528         bp->fw_last_msg = msg_data;
2529
2530         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2531
2532         if (!ack)
2533                 return 0;
2534
2535         /* wait for an acknowledgement. */
2536         for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2537                 msleep(10);
2538
2539                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2540
2541                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2542                         break;
2543         }
2544         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2545                 return 0;
2546
2547         /* If we timed out, inform the firmware that this is the case. */
2548         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2549                 msg_data &= ~BNX2_DRV_MSG_CODE;
2550                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2551
2552                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2553                 if (!silent) {
2554                         pr_err("fw sync timeout, reset code = %x\n", msg_data);
2555                         bnx2_dump_mcp_state(bp);
2556                 }
2557
2558                 return -EBUSY;
2559         }
2560
2561         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2562                 return -EIO;
2563
2564         return 0;
2565 }
2566
2567 static int
2568 bnx2_init_5709_context(struct bnx2 *bp)
2569 {
2570         int i, ret = 0;
2571         u32 val;
2572
2573         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2574         val |= (BNX2_PAGE_BITS - 8) << 16;
2575         BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2576         for (i = 0; i < 10; i++) {
2577                 val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2578                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2579                         break;
2580                 udelay(2);
2581         }
2582         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2583                 return -EBUSY;
2584
2585         for (i = 0; i < bp->ctx_pages; i++) {
2586                 int j;
2587
2588                 if (bp->ctx_blk[i])
2589                         memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2590                 else
2591                         return -ENOMEM;
2592
2593                 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2594                         (bp->ctx_blk_mapping[i] & 0xffffffff) |
2595                         BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2596                 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2597                         (u64) bp->ctx_blk_mapping[i] >> 32);
2598                 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2599                         BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2600                 for (j = 0; j < 10; j++) {
2601
2602                         val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2603                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2604                                 break;
2605                         udelay(5);
2606                 }
2607                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2608                         ret = -EBUSY;
2609                         break;
2610                 }
2611         }
2612         return ret;
2613 }
2614
2615 static void
2616 bnx2_init_context(struct bnx2 *bp)
2617 {
2618         u32 vcid;
2619
2620         vcid = 96;
2621         while (vcid) {
2622                 u32 vcid_addr, pcid_addr, offset;
2623                 int i;
2624
2625                 vcid--;
2626
2627                 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
2628                         u32 new_vcid;
2629
2630                         vcid_addr = GET_PCID_ADDR(vcid);
2631                         if (vcid & 0x8) {
2632                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2633                         }
2634                         else {
2635                                 new_vcid = vcid;
2636                         }
2637                         pcid_addr = GET_PCID_ADDR(new_vcid);
2638                 }
2639                 else {
2640                         vcid_addr = GET_CID_ADDR(vcid);
2641                         pcid_addr = vcid_addr;
2642                 }
2643
2644                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2645                         vcid_addr += (i << PHY_CTX_SHIFT);
2646                         pcid_addr += (i << PHY_CTX_SHIFT);
2647
2648                         BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2649                         BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2650
2651                         /* Zero out the context. */
2652                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2653                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2654                 }
2655         }
2656 }
2657
2658 static int
2659 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2660 {
2661         u16 *good_mbuf;
2662         u32 good_mbuf_cnt;
2663         u32 val;
2664
2665         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2666         if (good_mbuf == NULL)
2667                 return -ENOMEM;
2668
2669         BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2670                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2671
2672         good_mbuf_cnt = 0;
2673
2674         /* Allocate a bunch of mbufs and save the good ones in an array. */
2675         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2676         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2677                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2678                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2679
2680                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2681
2682                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2683
2684                 /* The addresses with Bit 9 set are bad memory blocks. */
2685                 if (!(val & (1 << 9))) {
2686                         good_mbuf[good_mbuf_cnt] = (u16) val;
2687                         good_mbuf_cnt++;
2688                 }
2689
2690                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2691         }
2692
2693         /* Free the good ones back to the mbuf pool thus discarding
2694          * all the bad ones. */
2695         while (good_mbuf_cnt) {
2696                 good_mbuf_cnt--;
2697
2698                 val = good_mbuf[good_mbuf_cnt];
2699                 val = (val << 9) | val | 1;
2700
2701                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2702         }
2703         kfree(good_mbuf);
2704         return 0;
2705 }
2706
2707 static void
2708 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2709 {
2710         u32 val;
2711
2712         val = (mac_addr[0] << 8) | mac_addr[1];
2713
2714         BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2715
2716         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2717                 (mac_addr[4] << 8) | mac_addr[5];
2718
2719         BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2720 }
2721
2722 static inline int
2723 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2724 {
2725         dma_addr_t mapping;
2726         struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2727         struct bnx2_rx_bd *rxbd =
2728                 &rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2729         struct page *page = alloc_page(gfp);
2730
2731         if (!page)
2732                 return -ENOMEM;
2733         mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2734                                PCI_DMA_FROMDEVICE);
2735         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2736                 __free_page(page);
2737                 return -EIO;
2738         }
2739
2740         rx_pg->page = page;
2741         dma_unmap_addr_set(rx_pg, mapping, mapping);
2742         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2743         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2744         return 0;
2745 }
2746
2747 static void
2748 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2749 {
2750         struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2751         struct page *page = rx_pg->page;
2752
2753         if (!page)
2754                 return;
2755
2756         dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2757                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2758
2759         __free_page(page);
2760         rx_pg->page = NULL;
2761 }
2762
2763 static inline int
2764 bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2765 {
2766         u8 *data;
2767         struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2768         dma_addr_t mapping;
2769         struct bnx2_rx_bd *rxbd =
2770                 &rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2771
2772         data = kmalloc(bp->rx_buf_size, gfp);
2773         if (!data)
2774                 return -ENOMEM;
2775
2776         mapping = dma_map_single(&bp->pdev->dev,
2777                                  get_l2_fhdr(data),
2778                                  bp->rx_buf_use_size,
2779                                  PCI_DMA_FROMDEVICE);
2780         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2781                 kfree(data);
2782                 return -EIO;
2783         }
2784
2785         rx_buf->data = data;
2786         dma_unmap_addr_set(rx_buf, mapping, mapping);
2787
2788         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2789         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2790
2791         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2792
2793         return 0;
2794 }
2795
2796 static int
2797 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2798 {
2799         struct status_block *sblk = bnapi->status_blk.msi;
2800         u32 new_link_state, old_link_state;
2801         int is_set = 1;
2802
2803         new_link_state = sblk->status_attn_bits & event;
2804         old_link_state = sblk->status_attn_bits_ack & event;
2805         if (new_link_state != old_link_state) {
2806                 if (new_link_state)
2807                         BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2808                 else
2809                         BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2810         } else
2811                 is_set = 0;
2812
2813         return is_set;
2814 }
2815
2816 static void
2817 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2818 {
2819         spin_lock(&bp->phy_lock);
2820
2821         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2822                 bnx2_set_link(bp);
2823         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2824                 bnx2_set_remote_link(bp);
2825
2826         spin_unlock(&bp->phy_lock);
2827
2828 }
2829
2830 static inline u16
2831 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2832 {
2833         u16 cons;
2834
2835         cons = READ_ONCE(*bnapi->hw_tx_cons_ptr);
2836
2837         if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
2838                 cons++;
2839         return cons;
2840 }
2841
2842 static int
2843 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2844 {
2845         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2846         u16 hw_cons, sw_cons, sw_ring_cons;
2847         int tx_pkt = 0, index;
2848         unsigned int tx_bytes = 0;
2849         struct netdev_queue *txq;
2850
2851         index = (bnapi - bp->bnx2_napi);
2852         txq = netdev_get_tx_queue(bp->dev, index);
2853
2854         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2855         sw_cons = txr->tx_cons;
2856
2857         while (sw_cons != hw_cons) {
2858                 struct bnx2_sw_tx_bd *tx_buf;
2859                 struct sk_buff *skb;
2860                 int i, last;
2861
2862                 sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
2863
2864                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2865                 skb = tx_buf->skb;
2866
2867                 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2868                 prefetch(&skb->end);
2869
2870                 /* partial BD completions possible with TSO packets */
2871                 if (tx_buf->is_gso) {
2872                         u16 last_idx, last_ring_idx;
2873
2874                         last_idx = sw_cons + tx_buf->nr_frags + 1;
2875                         last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2876                         if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
2877                                 last_idx++;
2878                         }
2879                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2880                                 break;
2881                         }
2882                 }
2883
2884                 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2885                         skb_headlen(skb), PCI_DMA_TODEVICE);
2886
2887                 tx_buf->skb = NULL;
2888                 last = tx_buf->nr_frags;
2889
2890                 for (i = 0; i < last; i++) {
2891                         struct bnx2_sw_tx_bd *tx_buf;
2892
2893                         sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2894
2895                         tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
2896                         dma_unmap_page(&bp->pdev->dev,
2897                                 dma_unmap_addr(tx_buf, mapping),
2898                                 skb_frag_size(&skb_shinfo(skb)->frags[i]),
2899                                 PCI_DMA_TODEVICE);
2900                 }
2901
2902                 sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2903
2904                 tx_bytes += skb->len;
2905                 dev_kfree_skb_any(skb);
2906                 tx_pkt++;
2907                 if (tx_pkt == budget)
2908                         break;
2909
2910                 if (hw_cons == sw_cons)
2911                         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2912         }
2913
2914         netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2915         txr->hw_tx_cons = hw_cons;
2916         txr->tx_cons = sw_cons;
2917
2918         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2919          * before checking for netif_tx_queue_stopped().  Without the
2920          * memory barrier, there is a small possibility that bnx2_start_xmit()
2921          * will miss it and cause the queue to be stopped forever.
2922          */
2923         smp_mb();
2924
2925         if (unlikely(netif_tx_queue_stopped(txq)) &&
2926                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2927                 __netif_tx_lock(txq, smp_processor_id());
2928                 if ((netif_tx_queue_stopped(txq)) &&
2929                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2930                         netif_tx_wake_queue(txq);
2931                 __netif_tx_unlock(txq);
2932         }
2933
2934         return tx_pkt;
2935 }
2936
2937 static void
2938 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2939                         struct sk_buff *skb, int count)
2940 {
2941         struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
2942         struct bnx2_rx_bd *cons_bd, *prod_bd;
2943         int i;
2944         u16 hw_prod, prod;
2945         u16 cons = rxr->rx_pg_cons;
2946
2947         cons_rx_pg = &rxr->rx_pg_ring[cons];
2948
2949         /* The caller was unable to allocate a new page to replace the
2950          * last one in the frags array, so we need to recycle that page
2951          * and then free the skb.
2952          */
2953         if (skb) {
2954                 struct page *page;
2955                 struct skb_shared_info *shinfo;
2956
2957                 shinfo = skb_shinfo(skb);
2958                 shinfo->nr_frags--;
2959                 page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2960                 __skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
2961
2962                 cons_rx_pg->page = page;
2963                 dev_kfree_skb(skb);
2964         }
2965
2966         hw_prod = rxr->rx_pg_prod;
2967
2968         for (i = 0; i < count; i++) {
2969                 prod = BNX2_RX_PG_RING_IDX(hw_prod);
2970
2971                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2972                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2973                 cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
2974                                                 [BNX2_RX_IDX(cons)];
2975                 prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
2976                                                 [BNX2_RX_IDX(prod)];
2977
2978                 if (prod != cons) {
2979                         prod_rx_pg->page = cons_rx_pg->page;
2980                         cons_rx_pg->page = NULL;
2981                         dma_unmap_addr_set(prod_rx_pg, mapping,
2982                                 dma_unmap_addr(cons_rx_pg, mapping));
2983
2984                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2985                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2986
2987                 }
2988                 cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
2989                 hw_prod = BNX2_NEXT_RX_BD(hw_prod);
2990         }
2991         rxr->rx_pg_prod = hw_prod;
2992         rxr->rx_pg_cons = cons;
2993 }
2994
2995 static inline void
2996 bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2997                    u8 *data, u16 cons, u16 prod)
2998 {
2999         struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
3000         struct bnx2_rx_bd *cons_bd, *prod_bd;
3001
3002         cons_rx_buf = &rxr->rx_buf_ring[cons];
3003         prod_rx_buf = &rxr->rx_buf_ring[prod];
3004
3005         dma_sync_single_for_device(&bp->pdev->dev,
3006                 dma_unmap_addr(cons_rx_buf, mapping),
3007                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
3008
3009         rxr->rx_prod_bseq += bp->rx_buf_use_size;
3010
3011         prod_rx_buf->data = data;
3012
3013         if (cons == prod)
3014                 return;
3015
3016         dma_unmap_addr_set(prod_rx_buf, mapping,
3017                         dma_unmap_addr(cons_rx_buf, mapping));
3018
3019         cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
3020         prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
3021         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
3022         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
3023 }
3024
3025 static struct sk_buff *
3026 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
3027             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
3028             u32 ring_idx)
3029 {
3030         int err;
3031         u16 prod = ring_idx & 0xffff;
3032         struct sk_buff *skb;
3033
3034         err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3035         if (unlikely(err)) {
3036                 bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
3037 error:
3038                 if (hdr_len) {
3039                         unsigned int raw_len = len + 4;
3040                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3041
3042                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3043                 }
3044                 return NULL;
3045         }
3046
3047         dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3048                          PCI_DMA_FROMDEVICE);
3049         skb = build_skb(data, 0);
3050         if (!skb) {
3051                 kfree(data);
3052                 goto error;
3053         }
3054         skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3055         if (hdr_len == 0) {
3056                 skb_put(skb, len);
3057                 return skb;
3058         } else {
3059                 unsigned int i, frag_len, frag_size, pages;
3060                 struct bnx2_sw_pg *rx_pg;
3061                 u16 pg_cons = rxr->rx_pg_cons;
3062                 u16 pg_prod = rxr->rx_pg_prod;
3063
3064                 frag_size = len + 4 - hdr_len;
3065                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3066                 skb_put(skb, hdr_len);
3067
3068                 for (i = 0; i < pages; i++) {
3069                         dma_addr_t mapping_old;
3070
3071                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3072                         if (unlikely(frag_len <= 4)) {
3073                                 unsigned int tail = 4 - frag_len;
3074
3075                                 rxr->rx_pg_cons = pg_cons;
3076                                 rxr->rx_pg_prod = pg_prod;
3077                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3078                                                         pages - i);
3079                                 skb->len -= tail;
3080                                 if (i == 0) {
3081                                         skb->tail -= tail;
3082                                 } else {
3083                                         skb_frag_t *frag =
3084                                                 &skb_shinfo(skb)->frags[i - 1];
3085                                         skb_frag_size_sub(frag, tail);
3086                                         skb->data_len -= tail;
3087                                 }
3088                                 return skb;
3089                         }
3090                         rx_pg = &rxr->rx_pg_ring[pg_cons];
3091
3092                         /* Don't unmap yet.  If we're unable to allocate a new
3093                          * page, we need to recycle the page and the DMA addr.
3094                          */
3095                         mapping_old = dma_unmap_addr(rx_pg, mapping);
3096                         if (i == pages - 1)
3097                                 frag_len -= 4;
3098
3099                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3100                         rx_pg->page = NULL;
3101
3102                         err = bnx2_alloc_rx_page(bp, rxr,
3103                                                  BNX2_RX_PG_RING_IDX(pg_prod),
3104                                                  GFP_ATOMIC);
3105                         if (unlikely(err)) {
3106                                 rxr->rx_pg_cons = pg_cons;
3107                                 rxr->rx_pg_prod = pg_prod;
3108                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3109                                                         pages - i);
3110                                 return NULL;
3111                         }
3112
3113                         dma_unmap_page(&bp->pdev->dev, mapping_old,
3114                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
3115
3116                         frag_size -= frag_len;
3117                         skb->data_len += frag_len;
3118                         skb->truesize += PAGE_SIZE;
3119                         skb->len += frag_len;
3120
3121                         pg_prod = BNX2_NEXT_RX_BD(pg_prod);
3122                         pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
3123                 }
3124                 rxr->rx_pg_prod = pg_prod;
3125                 rxr->rx_pg_cons = pg_cons;
3126         }
3127         return skb;
3128 }
3129
3130 static inline u16
3131 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3132 {
3133         u16 cons;
3134
3135         cons = READ_ONCE(*bnapi->hw_rx_cons_ptr);
3136
3137         if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
3138                 cons++;
3139         return cons;
3140 }
3141
3142 static int
3143 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3144 {
3145         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3146         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3147         struct l2_fhdr *rx_hdr;
3148         int rx_pkt = 0, pg_ring_used = 0;
3149
3150         if (budget <= 0)
3151                 return rx_pkt;
3152
3153         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3154         sw_cons = rxr->rx_cons;
3155         sw_prod = rxr->rx_prod;
3156
3157         /* Memory barrier necessary as speculative reads of the rx
3158          * buffer can be ahead of the index in the status block
3159          */
3160         rmb();
3161         while (sw_cons != hw_cons) {
3162                 unsigned int len, hdr_len;
3163                 u32 status;
3164                 struct bnx2_sw_bd *rx_buf, *next_rx_buf;
3165                 struct sk_buff *skb;
3166                 dma_addr_t dma_addr;
3167                 u8 *data;
3168                 u16 next_ring_idx;
3169
3170                 sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
3171                 sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
3172
3173                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3174                 data = rx_buf->data;
3175                 rx_buf->data = NULL;
3176
3177                 rx_hdr = get_l2_fhdr(data);
3178                 prefetch(rx_hdr);
3179
3180                 dma_addr = dma_unmap_addr(rx_buf, mapping);
3181
3182                 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3183                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3184                         PCI_DMA_FROMDEVICE);
3185
3186                 next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
3187                 next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
3188                 prefetch(get_l2_fhdr(next_rx_buf->data));
3189
3190                 len = rx_hdr->l2_fhdr_pkt_len;
3191                 status = rx_hdr->l2_fhdr_status;
3192
3193                 hdr_len = 0;
3194                 if (status & L2_FHDR_STATUS_SPLIT) {
3195                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3196                         pg_ring_used = 1;
3197                 } else if (len > bp->rx_jumbo_thresh) {
3198                         hdr_len = bp->rx_jumbo_thresh;
3199                         pg_ring_used = 1;
3200                 }
3201
3202                 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3203                                        L2_FHDR_ERRORS_PHY_DECODE |
3204                                        L2_FHDR_ERRORS_ALIGNMENT |
3205                                        L2_FHDR_ERRORS_TOO_SHORT |
3206                                        L2_FHDR_ERRORS_GIANT_FRAME))) {
3207
3208                         bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3209                                           sw_ring_prod);
3210                         if (pg_ring_used) {
3211                                 int pages;
3212
3213                                 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3214
3215                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3216                         }
3217                         goto next_rx;
3218                 }
3219
3220                 len -= 4;
3221
3222                 if (len <= bp->rx_copy_thresh) {
3223                         skb = netdev_alloc_skb(bp->dev, len + 6);
3224                         if (skb == NULL) {
3225                                 bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3226                                                   sw_ring_prod);
3227                                 goto next_rx;
3228                         }
3229
3230                         /* aligned copy */
3231                         memcpy(skb->data,
3232                                (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3233                                len + 6);
3234                         skb_reserve(skb, 6);
3235                         skb_put(skb, len);
3236
3237                         bnx2_reuse_rx_data(bp, rxr, data,
3238                                 sw_ring_cons, sw_ring_prod);
3239
3240                 } else {
3241                         skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3242                                           (sw_ring_cons << 16) | sw_ring_prod);
3243                         if (!skb)
3244                                 goto next_rx;
3245                 }
3246                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3247                     !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3248                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
3249
3250                 skb->protocol = eth_type_trans(skb, bp->dev);
3251
3252                 if (len > (bp->dev->mtu + ETH_HLEN) &&
3253                     skb->protocol != htons(0x8100) &&
3254                     skb->protocol != htons(ETH_P_8021AD)) {
3255
3256                         dev_kfree_skb(skb);
3257                         goto next_rx;
3258
3259                 }
3260
3261                 skb_checksum_none_assert(skb);
3262                 if ((bp->dev->features & NETIF_F_RXCSUM) &&
3263                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3264                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
3265
3266                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3267                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3268                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3269                 }
3270                 if ((bp->dev->features & NETIF_F_RXHASH) &&
3271                     ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3272                      L2_FHDR_STATUS_USE_RXHASH))
3273                         skb_set_hash(skb, rx_hdr->l2_fhdr_hash,
3274                                      PKT_HASH_TYPE_L3);
3275
3276                 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3277                 napi_gro_receive(&bnapi->napi, skb);
3278                 rx_pkt++;
3279
3280 next_rx:
3281                 sw_cons = BNX2_NEXT_RX_BD(sw_cons);
3282                 sw_prod = BNX2_NEXT_RX_BD(sw_prod);
3283
3284                 if ((rx_pkt == budget))
3285                         break;
3286
3287                 /* Refresh hw_cons to see if there is new work */
3288                 if (sw_cons == hw_cons) {
3289                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3290                         rmb();
3291                 }
3292         }
3293         rxr->rx_cons = sw_cons;
3294         rxr->rx_prod = sw_prod;
3295
3296         if (pg_ring_used)
3297                 BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3298
3299         BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3300
3301         BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3302
3303         mmiowb();
3304
3305         return rx_pkt;
3306
3307 }
3308
3309 /* MSI ISR - The only difference between this and the INTx ISR
3310  * is that the MSI interrupt is always serviced.
3311  */
3312 static irqreturn_t
3313 bnx2_msi(int irq, void *dev_instance)
3314 {
3315         struct bnx2_napi *bnapi = dev_instance;
3316         struct bnx2 *bp = bnapi->bp;
3317
3318         prefetch(bnapi->status_blk.msi);
3319         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3320                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3321                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3322
3323         /* Return here if interrupt is disabled. */
3324         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3325                 return IRQ_HANDLED;
3326
3327         napi_schedule(&bnapi->napi);
3328
3329         return IRQ_HANDLED;
3330 }
3331
3332 static irqreturn_t
3333 bnx2_msi_1shot(int irq, void *dev_instance)
3334 {
3335         struct bnx2_napi *bnapi = dev_instance;
3336         struct bnx2 *bp = bnapi->bp;
3337
3338         prefetch(bnapi->status_blk.msi);
3339
3340         /* Return here if interrupt is disabled. */
3341         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3342                 return IRQ_HANDLED;
3343
3344         napi_schedule(&bnapi->napi);
3345
3346         return IRQ_HANDLED;
3347 }
3348
3349 static irqreturn_t
3350 bnx2_interrupt(int irq, void *dev_instance)
3351 {
3352         struct bnx2_napi *bnapi = dev_instance;
3353         struct bnx2 *bp = bnapi->bp;
3354         struct status_block *sblk = bnapi->status_blk.msi;
3355
3356         /* When using INTx, it is possible for the interrupt to arrive
3357          * at the CPU before the status block posted prior to the
3358          * interrupt. Reading a register will flush the status block.
3359          * When using MSI, the MSI message will always complete after
3360          * the status block write.
3361          */
3362         if ((sblk->status_idx == bnapi->last_status_idx) &&
3363             (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3364              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3365                 return IRQ_NONE;
3366
3367         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3368                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3369                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3370
3371         /* Read back to deassert IRQ immediately to avoid too many
3372          * spurious interrupts.
3373          */
3374         BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3375
3376         /* Return here if interrupt is shared and is disabled. */
3377         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3378                 return IRQ_HANDLED;
3379
3380         if (napi_schedule_prep(&bnapi->napi)) {
3381                 bnapi->last_status_idx = sblk->status_idx;
3382                 __napi_schedule(&bnapi->napi);
3383         }
3384
3385         return IRQ_HANDLED;
3386 }
3387
3388 static inline int
3389 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3390 {
3391         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3392         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3393
3394         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3395             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3396                 return 1;
3397         return 0;
3398 }
3399
3400 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3401                                  STATUS_ATTN_BITS_TIMER_ABORT)
3402
3403 static inline int
3404 bnx2_has_work(struct bnx2_napi *bnapi)
3405 {
3406         struct status_block *sblk = bnapi->status_blk.msi;
3407
3408         if (bnx2_has_fast_work(bnapi))
3409                 return 1;
3410
3411 #ifdef BCM_CNIC
3412         if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3413                 return 1;
3414 #endif
3415
3416         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3417             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3418                 return 1;
3419
3420         return 0;
3421 }
3422
3423 static void
3424 bnx2_chk_missed_msi(struct bnx2 *bp)
3425 {
3426         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3427         u32 msi_ctrl;
3428
3429         if (bnx2_has_work(bnapi)) {
3430                 msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3431                 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3432                         return;
3433
3434                 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3435                         BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3436                                 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3437                         BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3438                         bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3439                 }
3440         }
3441
3442         bp->idle_chk_status_idx = bnapi->last_status_idx;
3443 }
3444
3445 #ifdef BCM_CNIC
3446 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3447 {
3448         struct cnic_ops *c_ops;
3449
3450         if (!bnapi->cnic_present)
3451                 return;
3452
3453         rcu_read_lock();
3454         c_ops = rcu_dereference(bp->cnic_ops);
3455         if (c_ops)
3456                 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3457                                                       bnapi->status_blk.msi);
3458         rcu_read_unlock();
3459 }
3460 #endif
3461
3462 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3463 {
3464         struct status_block *sblk = bnapi->status_blk.msi;
3465         u32 status_attn_bits = sblk->status_attn_bits;
3466         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3467
3468         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3469             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3470
3471                 bnx2_phy_int(bp, bnapi);
3472
3473                 /* This is needed to take care of transient status
3474                  * during link changes.
3475                  */
3476                 BNX2_WR(bp, BNX2_HC_COMMAND,
3477                         bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3478                 BNX2_RD(bp, BNX2_HC_COMMAND);
3479         }
3480 }
3481
3482 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3483                           int work_done, int budget)
3484 {
3485         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3486         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3487
3488         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3489                 bnx2_tx_int(bp, bnapi, 0);
3490
3491         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3492                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3493
3494         return work_done;
3495 }
3496
3497 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3498 {
3499         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3500         struct bnx2 *bp = bnapi->bp;
3501         int work_done = 0;
3502         struct status_block_msix *sblk = bnapi->status_blk.msix;
3503
3504         while (1) {
3505                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3506                 if (unlikely(work_done >= budget))
3507                         break;
3508
3509                 bnapi->last_status_idx = sblk->status_idx;
3510                 /* status idx must be read before checking for more work. */
3511                 rmb();
3512                 if (likely(!bnx2_has_fast_work(bnapi))) {
3513
3514                         napi_complete_done(napi, work_done);
3515                         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3516                                 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3517                                 bnapi->last_status_idx);
3518                         break;
3519                 }
3520         }
3521         return work_done;
3522 }
3523
3524 static int bnx2_poll(struct napi_struct *napi, int budget)
3525 {
3526         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3527         struct bnx2 *bp = bnapi->bp;
3528         int work_done = 0;
3529         struct status_block *sblk = bnapi->status_blk.msi;
3530
3531         while (1) {
3532                 bnx2_poll_link(bp, bnapi);
3533
3534                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3535
3536 #ifdef BCM_CNIC
3537                 bnx2_poll_cnic(bp, bnapi);
3538 #endif
3539
3540                 /* bnapi->last_status_idx is used below to tell the hw how
3541                  * much work has been processed, so we must read it before
3542                  * checking for more work.
3543                  */
3544                 bnapi->last_status_idx = sblk->status_idx;
3545
3546                 if (unlikely(work_done >= budget))
3547                         break;
3548
3549                 rmb();
3550                 if (likely(!bnx2_has_work(bnapi))) {
3551                         napi_complete_done(napi, work_done);
3552                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3553                                 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3554                                         BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3555                                         bnapi->last_status_idx);
3556                                 break;
3557                         }
3558                         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3559                                 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3560                                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3561                                 bnapi->last_status_idx);
3562
3563                         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3564                                 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3565                                 bnapi->last_status_idx);
3566                         break;
3567                 }
3568         }
3569
3570         return work_done;
3571 }
3572
3573 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3574  * from set_multicast.
3575  */
3576 static void
3577 bnx2_set_rx_mode(struct net_device *dev)
3578 {
3579         struct bnx2 *bp = netdev_priv(dev);
3580         u32 rx_mode, sort_mode;
3581         struct netdev_hw_addr *ha;
3582         int i;
3583
3584         if (!netif_running(dev))
3585                 return;
3586
3587         spin_lock_bh(&bp->phy_lock);
3588
3589         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3590                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3591         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3592         if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3593              (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3594                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3595         if (dev->flags & IFF_PROMISC) {
3596                 /* Promiscuous mode. */
3597                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3598                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3599                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3600         }
3601         else if (dev->flags & IFF_ALLMULTI) {
3602                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3603                         BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3604                                 0xffffffff);
3605                 }
3606                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3607         }
3608         else {
3609                 /* Accept one or more multicast(s). */
3610                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3611                 u32 regidx;
3612                 u32 bit;
3613                 u32 crc;
3614
3615                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3616
3617                 netdev_for_each_mc_addr(ha, dev) {
3618                         crc = ether_crc_le(ETH_ALEN, ha->addr);
3619                         bit = crc & 0xff;
3620                         regidx = (bit & 0xe0) >> 5;
3621                         bit &= 0x1f;
3622                         mc_filter[regidx] |= (1 << bit);
3623                 }
3624
3625                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3626                         BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3627                                 mc_filter[i]);
3628                 }
3629
3630                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3631         }
3632
3633         if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3634                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3635                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3636                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3637         } else if (!(dev->flags & IFF_PROMISC)) {
3638                 /* Add all entries into to the match filter list */
3639                 i = 0;
3640                 netdev_for_each_uc_addr(ha, dev) {
3641                         bnx2_set_mac_addr(bp, ha->addr,
3642                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3643                         sort_mode |= (1 <<
3644                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3645                         i++;
3646                 }
3647
3648         }
3649
3650         if (rx_mode != bp->rx_mode) {
3651                 bp->rx_mode = rx_mode;
3652                 BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3653         }
3654
3655         BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3656         BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3657         BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3658
3659         spin_unlock_bh(&bp->phy_lock);
3660 }
3661
3662 static int
3663 check_fw_section(const struct firmware *fw,
3664                  const struct bnx2_fw_file_section *section,
3665                  u32 alignment, bool non_empty)
3666 {
3667         u32 offset = be32_to_cpu(section->offset);
3668         u32 len = be32_to_cpu(section->len);
3669
3670         if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3671                 return -EINVAL;
3672         if ((non_empty && len == 0) || len > fw->size - offset ||
3673             len & (alignment - 1))
3674                 return -EINVAL;
3675         return 0;
3676 }
3677
3678 static int
3679 check_mips_fw_entry(const struct firmware *fw,
3680                     const struct bnx2_mips_fw_file_entry *entry)
3681 {
3682         if (check_fw_section(fw, &entry->text, 4, true) ||
3683             check_fw_section(fw, &entry->data, 4, false) ||
3684             check_fw_section(fw, &entry->rodata, 4, false))
3685                 return -EINVAL;
3686         return 0;
3687 }
3688
3689 static void bnx2_release_firmware(struct bnx2 *bp)
3690 {
3691         if (bp->rv2p_firmware) {
3692                 release_firmware(bp->mips_firmware);
3693                 release_firmware(bp->rv2p_firmware);
3694                 bp->rv2p_firmware = NULL;
3695         }
3696 }
3697
3698 static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3699 {
3700         const char *mips_fw_file, *rv2p_fw_file;
3701         const struct bnx2_mips_fw_file *mips_fw;
3702         const struct bnx2_rv2p_fw_file *rv2p_fw;
3703         int rc;
3704
3705         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
3706                 mips_fw_file = FW_MIPS_FILE_09;
3707                 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
3708                     (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
3709                         rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3710                 else
3711                         rv2p_fw_file = FW_RV2P_FILE_09;
3712         } else {
3713                 mips_fw_file = FW_MIPS_FILE_06;
3714                 rv2p_fw_file = FW_RV2P_FILE_06;
3715         }
3716
3717         rc = reject_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3718         if (rc) {
3719                 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3720                 goto out;
3721         }
3722
3723         rc = reject_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3724         if (rc) {
3725                 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3726                 goto err_release_mips_firmware;
3727         }
3728         mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3729         rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3730         if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3731             check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3732             check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3733             check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3734             check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3735             check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3736                 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3737                 rc = -EINVAL;
3738                 goto err_release_firmware;
3739         }
3740         if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3741             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3742             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3743                 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3744                 rc = -EINVAL;
3745                 goto err_release_firmware;
3746         }
3747 out:
3748         return rc;
3749
3750 err_release_firmware:
3751         release_firmware(bp->rv2p_firmware);
3752         bp->rv2p_firmware = NULL;
3753 err_release_mips_firmware:
3754         release_firmware(bp->mips_firmware);
3755         goto out;
3756 }
3757
3758 static int bnx2_request_firmware(struct bnx2 *bp)
3759 {
3760         return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3761 }
3762
3763 static u32
3764 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3765 {
3766         switch (idx) {
3767         case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3768                 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3769                 rv2p_code |= RV2P_BD_PAGE_SIZE;
3770                 break;
3771         }
3772         return rv2p_code;
3773 }
3774
3775 static int
3776 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3777              const struct bnx2_rv2p_fw_file_entry *fw_entry)
3778 {
3779         u32 rv2p_code_len, file_offset;
3780         __be32 *rv2p_code;
3781         int i;
3782         u32 val, cmd, addr;
3783
3784         rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3785         file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3786
3787         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3788
3789         if (rv2p_proc == RV2P_PROC1) {
3790                 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3791                 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3792         } else {
3793                 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3794                 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3795         }
3796
3797         for (i = 0; i < rv2p_code_len; i += 8) {
3798                 BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3799                 rv2p_code++;
3800                 BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3801                 rv2p_code++;
3802
3803                 val = (i / 8) | cmd;
3804                 BNX2_WR(bp, addr, val);
3805         }
3806
3807         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3808         for (i = 0; i < 8; i++) {
3809                 u32 loc, code;
3810
3811                 loc = be32_to_cpu(fw_entry->fixup[i]);
3812                 if (loc && ((loc * 4) < rv2p_code_len)) {
3813                         code = be32_to_cpu(*(rv2p_code + loc - 1));
3814                         BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3815                         code = be32_to_cpu(*(rv2p_code + loc));
3816                         code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3817                         BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3818
3819                         val = (loc / 2) | cmd;
3820                         BNX2_WR(bp, addr, val);
3821                 }
3822         }
3823
3824         /* Reset the processor, un-stall is done later. */
3825         if (rv2p_proc == RV2P_PROC1) {
3826                 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3827         }
3828         else {
3829                 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3830         }
3831
3832         return 0;
3833 }
3834
3835 static int
3836 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3837             const struct bnx2_mips_fw_file_entry *fw_entry)
3838 {
3839         u32 addr, len, file_offset;
3840         __be32 *data;
3841         u32 offset;
3842         u32 val;
3843
3844         /* Halt the CPU. */
3845         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3846         val |= cpu_reg->mode_value_halt;
3847         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3848         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3849
3850         /* Load the Text area. */
3851         addr = be32_to_cpu(fw_entry->text.addr);
3852         len = be32_to_cpu(fw_entry->text.len);
3853         file_offset = be32_to_cpu(fw_entry->text.offset);
3854         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3855
3856         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3857         if (len) {
3858                 int j;
3859
3860                 for (j = 0; j < (len / 4); j++, offset += 4)
3861                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3862         }
3863
3864         /* Load the Data area. */
3865         addr = be32_to_cpu(fw_entry->data.addr);
3866         len = be32_to_cpu(fw_entry->data.len);
3867         file_offset = be32_to_cpu(fw_entry->data.offset);
3868         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3869
3870         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3871         if (len) {
3872                 int j;
3873
3874                 for (j = 0; j < (len / 4); j++, offset += 4)
3875                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3876         }
3877
3878         /* Load the Read-Only area. */
3879         addr = be32_to_cpu(fw_entry->rodata.addr);
3880         len = be32_to_cpu(fw_entry->rodata.len);
3881         file_offset = be32_to_cpu(fw_entry->rodata.offset);
3882         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3883
3884         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3885         if (len) {
3886                 int j;
3887
3888                 for (j = 0; j < (len / 4); j++, offset += 4)
3889                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3890         }
3891
3892         /* Clear the pre-fetch instruction. */
3893         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3894
3895         val = be32_to_cpu(fw_entry->start_addr);
3896         bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3897
3898         /* Start the CPU. */
3899         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3900         val &= ~cpu_reg->mode_value_halt;
3901         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3902         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3903
3904         return 0;
3905 }
3906
3907 static int
3908 bnx2_init_cpus(struct bnx2 *bp)
3909 {
3910         const struct bnx2_mips_fw_file *mips_fw =
3911                 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3912         const struct bnx2_rv2p_fw_file *rv2p_fw =
3913                 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3914         int rc;
3915
3916         /* Initialize the RV2P processor. */
3917         load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3918         load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3919
3920         /* Initialize the RX Processor. */
3921         rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3922         if (rc)
3923                 goto init_cpu_err;
3924
3925         /* Initialize the TX Processor. */
3926         rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3927         if (rc)
3928                 goto init_cpu_err;
3929
3930         /* Initialize the TX Patch-up Processor. */
3931         rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3932         if (rc)
3933                 goto init_cpu_err;
3934
3935         /* Initialize the Completion Processor. */
3936         rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3937         if (rc)
3938                 goto init_cpu_err;
3939
3940         /* Initialize the Command Processor. */
3941         rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3942
3943 init_cpu_err:
3944         return rc;
3945 }
3946
3947 static void
3948 bnx2_setup_wol(struct bnx2 *bp)
3949 {
3950         int i;
3951         u32 val, wol_msg;
3952
3953         if (bp->wol) {
3954                 u32 advertising;
3955                 u8 autoneg;
3956
3957                 autoneg = bp->autoneg;
3958                 advertising = bp->advertising;
3959
3960                 if (bp->phy_port == PORT_TP) {
3961                         bp->autoneg = AUTONEG_SPEED;
3962                         bp->advertising = ADVERTISED_10baseT_Half |
3963                                 ADVERTISED_10baseT_Full |
3964                                 ADVERTISED_100baseT_Half |
3965                                 ADVERTISED_100baseT_Full |
3966                                 ADVERTISED_Autoneg;
3967                 }
3968
3969                 spin_lock_bh(&bp->phy_lock);
3970                 bnx2_setup_phy(bp, bp->phy_port);
3971                 spin_unlock_bh(&bp->phy_lock);
3972
3973                 bp->autoneg = autoneg;
3974                 bp->advertising = advertising;
3975
3976                 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3977
3978                 val = BNX2_RD(bp, BNX2_EMAC_MODE);
3979
3980                 /* Enable port mode. */
3981                 val &= ~BNX2_EMAC_MODE_PORT;
3982                 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3983                        BNX2_EMAC_MODE_ACPI_RCVD |
3984                        BNX2_EMAC_MODE_MPKT;
3985                 if (bp->phy_port == PORT_TP) {
3986                         val |= BNX2_EMAC_MODE_PORT_MII;
3987                 } else {
3988                         val |= BNX2_EMAC_MODE_PORT_GMII;
3989                         if (bp->line_speed == SPEED_2500)
3990                                 val |= BNX2_EMAC_MODE_25G_MODE;
3991                 }
3992
3993                 BNX2_WR(bp, BNX2_EMAC_MODE, val);
3994
3995                 /* receive all multicast */
3996                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3997                         BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3998                                 0xffffffff);
3999                 }
4000                 BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
4001
4002                 val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
4003                 BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
4004                 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
4005                 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
4006
4007                 /* Need to enable EMAC and RPM for WOL. */
4008                 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4009                         BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
4010                         BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
4011                         BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
4012
4013                 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4014                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4015                 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4016
4017                 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4018         } else {
4019                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4020         }
4021
4022         if (!(bp->flags & BNX2_FLAG_NO_WOL)) {
4023                 u32 val;
4024
4025                 wol_msg |= BNX2_DRV_MSG_DATA_WAIT3;
4026                 if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) {
4027                         bnx2_fw_sync(bp, wol_msg, 1, 0);
4028                         return;
4029                 }
4030                 /* Tell firmware not to power down the PHY yet, otherwise
4031                  * the chip will take a long time to respond to MMIO reads.
4032                  */
4033                 val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
4034                 bnx2_shmem_wr(bp, BNX2_PORT_FEATURE,
4035                               val | BNX2_PORT_FEATURE_ASF_ENABLED);
4036                 bnx2_fw_sync(bp, wol_msg, 1, 0);
4037                 bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val);
4038         }
4039
4040 }
4041
4042 static int
4043 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
4044 {
4045         switch (state) {
4046         case PCI_D0: {
4047                 u32 val;
4048
4049                 pci_enable_wake(bp->pdev, PCI_D0, false);
4050                 pci_set_power_state(bp->pdev, PCI_D0);
4051
4052                 val = BNX2_RD(bp, BNX2_EMAC_MODE);
4053                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
4054                 val &= ~BNX2_EMAC_MODE_MPKT;
4055                 BNX2_WR(bp, BNX2_EMAC_MODE, val);
4056
4057                 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4058                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4059                 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4060                 break;
4061         }
4062         case PCI_D3hot: {
4063                 bnx2_setup_wol(bp);
4064                 pci_wake_from_d3(bp->pdev, bp->wol);
4065                 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4066                     (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
4067
4068                         if (bp->wol)
4069                                 pci_set_power_state(bp->pdev, PCI_D3hot);
4070                         break;
4071
4072                 }
4073                 if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4074                         u32 val;
4075
4076                         /* Tell firmware not to power down the PHY yet,
4077                          * otherwise the other port may not respond to
4078                          * MMIO reads.
4079                          */
4080                         val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
4081                         val &= ~BNX2_CONDITION_PM_STATE_MASK;
4082                         val |= BNX2_CONDITION_PM_STATE_UNPREP;
4083                         bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val);
4084                 }
4085                 pci_set_power_state(bp->pdev, PCI_D3hot);
4086
4087                 /* No more memory access after this point until
4088                  * device is brought back to D0.
4089                  */
4090                 break;
4091         }
4092         default:
4093                 return -EINVAL;
4094         }
4095         return 0;
4096 }
4097
4098 static int
4099 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4100 {
4101         u32 val;
4102         int j;
4103
4104         /* Request access to the flash interface. */
4105         BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4106         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4107                 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4108                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4109                         break;
4110
4111                 udelay(5);
4112         }
4113
4114         if (j >= NVRAM_TIMEOUT_COUNT)
4115                 return -EBUSY;
4116
4117         return 0;
4118 }
4119
4120 static int
4121 bnx2_release_nvram_lock(struct bnx2 *bp)
4122 {
4123         int j;
4124         u32 val;
4125
4126         /* Relinquish nvram interface. */
4127         BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4128
4129         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4130                 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4131                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4132                         break;
4133
4134                 udelay(5);
4135         }
4136
4137         if (j >= NVRAM_TIMEOUT_COUNT)
4138                 return -EBUSY;
4139
4140         return 0;
4141 }
4142
4143
4144 static int
4145 bnx2_enable_nvram_write(struct bnx2 *bp)
4146 {
4147         u32 val;
4148
4149         val = BNX2_RD(bp, BNX2_MISC_CFG);
4150         BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4151
4152         if (bp->flash_info->flags & BNX2_NV_WREN) {
4153                 int j;
4154
4155                 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4156                 BNX2_WR(bp, BNX2_NVM_COMMAND,
4157                         BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4158
4159                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4160                         udelay(5);
4161
4162                         val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4163                         if (val & BNX2_NVM_COMMAND_DONE)
4164                                 break;
4165                 }
4166
4167                 if (j >= NVRAM_TIMEOUT_COUNT)
4168                         return -EBUSY;
4169         }
4170         return 0;
4171 }
4172
4173 static void
4174 bnx2_disable_nvram_write(struct bnx2 *bp)
4175 {
4176         u32 val;
4177
4178         val = BNX2_RD(bp, BNX2_MISC_CFG);
4179         BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4180 }
4181
4182
4183 static void
4184 bnx2_enable_nvram_access(struct bnx2 *bp)
4185 {
4186         u32 val;
4187
4188         val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4189         /* Enable both bits, even on read. */
4190         BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4191                 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4192 }
4193
4194 static void
4195 bnx2_disable_nvram_access(struct bnx2 *bp)
4196 {
4197         u32 val;
4198
4199         val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4200         /* Disable both bits, even after read. */
4201         BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4202                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4203                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
4204 }
4205
4206 static int
4207 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4208 {
4209         u32 cmd;
4210         int j;
4211
4212         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4213                 /* Buffered flash, no erase needed */
4214                 return 0;
4215
4216         /* Build an erase command */
4217         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4218               BNX2_NVM_COMMAND_DOIT;
4219
4220         /* Need to clear DONE bit separately. */
4221         BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4222
4223         /* Address of the NVRAM to read from. */
4224         BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4225
4226         /* Issue an erase command. */
4227         BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4228
4229         /* Wait for completion. */
4230         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4231                 u32 val;
4232
4233                 udelay(5);
4234
4235                 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4236                 if (val & BNX2_NVM_COMMAND_DONE)
4237                         break;
4238         }
4239
4240         if (j >= NVRAM_TIMEOUT_COUNT)
4241                 return -EBUSY;
4242
4243         return 0;
4244 }
4245
4246 static int
4247 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4248 {
4249         u32 cmd;
4250         int j;
4251
4252         /* Build the command word. */
4253         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4254
4255         /* Calculate an offset of a buffered flash, not needed for 5709. */
4256         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4257                 offset = ((offset / bp->flash_info->page_size) <<
4258                            bp->flash_info->page_bits) +
4259                           (offset % bp->flash_info->page_size);
4260         }
4261
4262         /* Need to clear DONE bit separately. */
4263         BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4264
4265         /* Address of the NVRAM to read from. */
4266         BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4267
4268         /* Issue a read command. */
4269         BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4270
4271         /* Wait for completion. */
4272         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4273                 u32 val;
4274
4275                 udelay(5);
4276
4277                 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4278                 if (val & BNX2_NVM_COMMAND_DONE) {
4279                         __be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
4280                         memcpy(ret_val, &v, 4);
4281                         break;
4282                 }
4283         }
4284         if (j >= NVRAM_TIMEOUT_COUNT)
4285                 return -EBUSY;
4286
4287         return 0;
4288 }
4289
4290
4291 static int
4292 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4293 {
4294         u32 cmd;
4295         __be32 val32;
4296         int j;
4297
4298         /* Build the command word. */
4299         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4300
4301         /* Calculate an offset of a buffered flash, not needed for 5709. */
4302         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4303                 offset = ((offset / bp->flash_info->page_size) <<
4304                           bp->flash_info->page_bits) +
4305                          (offset % bp->flash_info->page_size);
4306         }
4307
4308         /* Need to clear DONE bit separately. */
4309         BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4310
4311         memcpy(&val32, val, 4);
4312
4313         /* Write the data. */
4314         BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4315
4316         /* Address of the NVRAM to write to. */
4317         BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4318
4319         /* Issue the write command. */
4320         BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4321
4322         /* Wait for completion. */
4323         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4324                 udelay(5);
4325
4326                 if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4327                         break;
4328         }
4329         if (j >= NVRAM_TIMEOUT_COUNT)
4330                 return -EBUSY;
4331
4332         return 0;
4333 }
4334
4335 static int
4336 bnx2_init_nvram(struct bnx2 *bp)
4337 {
4338         u32 val;
4339         int j, entry_count, rc = 0;
4340         const struct flash_spec *flash;
4341
4342         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4343                 bp->flash_info = &flash_5709;
4344                 goto get_flash_size;
4345         }
4346
4347         /* Determine the selected interface. */
4348         val = BNX2_RD(bp, BNX2_NVM_CFG1);
4349
4350         entry_count = ARRAY_SIZE(flash_table);
4351
4352         if (val & 0x40000000) {
4353
4354                 /* Flash interface has been reconfigured */
4355                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4356                      j++, flash++) {
4357                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
4358                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4359                                 bp->flash_info = flash;
4360                                 break;
4361                         }
4362                 }
4363         }
4364         else {
4365                 u32 mask;
4366                 /* Not yet been reconfigured */
4367
4368                 if (val & (1 << 23))
4369                         mask = FLASH_BACKUP_STRAP_MASK;
4370                 else
4371                         mask = FLASH_STRAP_MASK;
4372
4373                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4374                         j++, flash++) {
4375
4376                         if ((val & mask) == (flash->strapping & mask)) {
4377                                 bp->flash_info = flash;
4378
4379                                 /* Request access to the flash interface. */
4380                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4381                                         return rc;
4382
4383                                 /* Enable access to flash interface */
4384                                 bnx2_enable_nvram_access(bp);
4385
4386                                 /* Reconfigure the flash interface */
4387                                 BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
4388                                 BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
4389                                 BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
4390                                 BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4391
4392                                 /* Disable access to flash interface */
4393                                 bnx2_disable_nvram_access(bp);
4394                                 bnx2_release_nvram_lock(bp);
4395
4396                                 break;
4397                         }
4398                 }
4399         } /* if (val & 0x40000000) */
4400
4401         if (j == entry_count) {
4402                 bp->flash_info = NULL;
4403                 pr_alert("Unknown flash/EEPROM type\n");
4404                 return -ENODEV;
4405         }
4406
4407 get_flash_size:
4408         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4409         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4410         if (val)
4411                 bp->flash_size = val;
4412         else
4413                 bp->flash_size = bp->flash_info->total_size;
4414
4415         return rc;
4416 }
4417
4418 static int
4419 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4420                 int buf_size)
4421 {
4422         int rc = 0;
4423         u32 cmd_flags, offset32, len32, extra;
4424
4425         if (buf_size == 0)
4426                 return 0;
4427
4428         /* Request access to the flash interface. */
4429         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4430                 return rc;
4431
4432         /* Enable access to flash interface */
4433         bnx2_enable_nvram_access(bp);
4434
4435         len32 = buf_size;
4436         offset32 = offset;
4437         extra = 0;
4438
4439         cmd_flags = 0;
4440
4441         if (offset32 & 3) {
4442                 u8 buf[4];
4443                 u32 pre_len;
4444
4445                 offset32 &= ~3;
4446                 pre_len = 4 - (offset & 3);
4447
4448                 if (pre_len >= len32) {
4449                         pre_len = len32;
4450                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4451                                     BNX2_NVM_COMMAND_LAST;
4452                 }
4453                 else {
4454                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4455                 }
4456
4457                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4458
4459                 if (rc)
4460                         return rc;
4461
4462                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4463
4464                 offset32 += 4;
4465                 ret_buf += pre_len;
4466                 len32 -= pre_len;
4467         }
4468         if (len32 & 3) {
4469                 extra = 4 - (len32 & 3);
4470                 len32 = (len32 + 4) & ~3;
4471         }
4472
4473         if (len32 == 4) {
4474                 u8 buf[4];
4475
4476                 if (cmd_flags)
4477                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4478                 else
4479                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4480                                     BNX2_NVM_COMMAND_LAST;
4481
4482                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4483
4484                 memcpy(ret_buf, buf, 4 - extra);
4485         }
4486         else if (len32 > 0) {
4487                 u8 buf[4];
4488
4489                 /* Read the first word. */
4490                 if (cmd_flags)
4491                         cmd_flags = 0;
4492                 else
4493                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4494
4495                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4496
4497                 /* Advance to the next dword. */
4498                 offset32 += 4;
4499                 ret_buf += 4;
4500                 len32 -= 4;
4501
4502                 while (len32 > 4 && rc == 0) {
4503                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4504
4505                         /* Advance to the next dword. */
4506                         offset32 += 4;
4507                         ret_buf += 4;
4508                         len32 -= 4;
4509                 }
4510
4511                 if (rc)
4512                         return rc;
4513
4514                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4515                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4516
4517                 memcpy(ret_buf, buf, 4 - extra);
4518         }
4519
4520         /* Disable access to flash interface */
4521         bnx2_disable_nvram_access(bp);
4522
4523         bnx2_release_nvram_lock(bp);
4524
4525         return rc;
4526 }
4527
4528 static int
4529 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4530                 int buf_size)
4531 {
4532         u32 written, offset32, len32;
4533         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4534         int rc = 0;
4535         int align_start, align_end;
4536
4537         buf = data_buf;
4538         offset32 = offset;
4539         len32 = buf_size;
4540         align_start = align_end = 0;
4541
4542         if ((align_start = (offset32 & 3))) {
4543                 offset32 &= ~3;
4544                 len32 += align_start;
4545                 if (len32 < 4)
4546                         len32 = 4;
4547                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4548                         return rc;
4549         }
4550
4551         if (len32 & 3) {
4552                 align_end = 4 - (len32 & 3);
4553                 len32 += align_end;
4554                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4555                         return rc;
4556         }
4557
4558         if (align_start || align_end) {
4559                 align_buf = kmalloc(len32, GFP_KERNEL);
4560                 if (align_buf == NULL)
4561                         return -ENOMEM;
4562                 if (align_start) {
4563                         memcpy(align_buf, start, 4);
4564                 }
4565                 if (align_end) {
4566                         memcpy(align_buf + len32 - 4, end, 4);
4567                 }
4568                 memcpy(align_buf + align_start, data_buf, buf_size);
4569                 buf = align_buf;
4570         }
4571
4572         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4573                 flash_buffer = kmalloc(264, GFP_KERNEL);
4574                 if (flash_buffer == NULL) {
4575                         rc = -ENOMEM;
4576                         goto nvram_write_end;
4577                 }
4578         }
4579
4580         written = 0;
4581         while ((written < len32) && (rc == 0)) {
4582                 u32 page_start, page_end, data_start, data_end;
4583                 u32 addr, cmd_flags;
4584                 int i;
4585
4586                 /* Find the page_start addr */
4587                 page_start = offset32 + written;
4588                 page_start -= (page_start % bp->flash_info->page_size);
4589                 /* Find the page_end addr */
4590                 page_end = page_start + bp->flash_info->page_size;
4591                 /* Find the data_start addr */
4592                 data_start = (written == 0) ? offset32 : page_start;
4593                 /* Find the data_end addr */
4594                 data_end = (page_end > offset32 + len32) ?
4595                         (offset32 + len32) : page_end;
4596
4597                 /* Request access to the flash interface. */
4598                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4599                         goto nvram_write_end;
4600
4601                 /* Enable access to flash interface */
4602                 bnx2_enable_nvram_access(bp);
4603
4604                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4605                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4606                         int j;
4607
4608                         /* Read the whole page into the buffer
4609                          * (non-buffer flash only) */
4610                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4611                                 if (j == (bp->flash_info->page_size - 4)) {
4612                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4613                                 }
4614                                 rc = bnx2_nvram_read_dword(bp,
4615                                         page_start + j,
4616                                         &flash_buffer[j],
4617                                         cmd_flags);
4618
4619                                 if (rc)
4620                                         goto nvram_write_end;
4621
4622                                 cmd_flags = 0;
4623                         }
4624                 }
4625
4626                 /* Enable writes to flash interface (unlock write-protect) */
4627                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4628                         goto nvram_write_end;
4629
4630                 /* Loop to write back the buffer data from page_start to
4631                  * data_start */
4632                 i = 0;
4633                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4634                         /* Erase the page */
4635                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4636                                 goto nvram_write_end;
4637
4638                         /* Re-enable the write again for the actual write */
4639                         bnx2_enable_nvram_write(bp);
4640
4641                         for (addr = page_start; addr < data_start;
4642                                 addr += 4, i += 4) {
4643
4644                                 rc = bnx2_nvram_write_dword(bp, addr,
4645                                         &flash_buffer[i], cmd_flags);
4646
4647                                 if (rc != 0)
4648                                         goto nvram_write_end;
4649
4650                                 cmd_flags = 0;
4651                         }
4652                 }
4653
4654                 /* Loop to write the new data from data_start to data_end */
4655                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4656                         if ((addr == page_end - 4) ||
4657                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4658                                  (addr == data_end - 4))) {
4659
4660                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4661                         }
4662                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4663                                 cmd_flags);
4664
4665                         if (rc != 0)
4666                                 goto nvram_write_end;
4667
4668                         cmd_flags = 0;
4669                         buf += 4;
4670                 }
4671
4672                 /* Loop to write back the buffer data from data_end
4673                  * to page_end */
4674                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4675                         for (addr = data_end; addr < page_end;
4676                                 addr += 4, i += 4) {
4677
4678                                 if (addr == page_end-4) {
4679                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4680                                 }
4681                                 rc = bnx2_nvram_write_dword(bp, addr,
4682                                         &flash_buffer[i], cmd_flags);
4683
4684                                 if (rc != 0)
4685                                         goto nvram_write_end;
4686
4687                                 cmd_flags = 0;
4688                         }
4689                 }
4690
4691                 /* Disable writes to flash interface (lock write-protect) */
4692                 bnx2_disable_nvram_write(bp);
4693
4694                 /* Disable access to flash interface */
4695                 bnx2_disable_nvram_access(bp);
4696                 bnx2_release_nvram_lock(bp);
4697
4698                 /* Increment written */
4699                 written += data_end - data_start;
4700         }
4701
4702 nvram_write_end:
4703         kfree(flash_buffer);
4704         kfree(align_buf);
4705         return rc;
4706 }
4707
4708 static void
4709 bnx2_init_fw_cap(struct bnx2 *bp)
4710 {
4711         u32 val, sig = 0;
4712
4713         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4714         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4715
4716         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4717                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4718
4719         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4720         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4721                 return;
4722
4723         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4724                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4725                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4726         }
4727
4728         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4729             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4730                 u32 link;
4731
4732                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4733
4734                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4735                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4736                         bp->phy_port = PORT_FIBRE;
4737                 else
4738                         bp->phy_port = PORT_TP;
4739
4740                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4741                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4742         }
4743
4744         if (netif_running(bp->dev) && sig)
4745                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4746 }
4747
4748 static void
4749 bnx2_setup_msix_tbl(struct bnx2 *bp)
4750 {
4751         BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4752
4753         BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4754         BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4755 }
4756
4757 static void
4758 bnx2_wait_dma_complete(struct bnx2 *bp)
4759 {
4760         u32 val;
4761         int i;
4762
4763         /*
4764          * Wait for the current PCI transaction to complete before
4765          * issuing a reset.
4766          */
4767         if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
4768             (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
4769                 BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4770                         BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4771                         BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4772                         BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4773                         BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4774                 val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4775                 udelay(5);
4776         } else {  /* 5709 */
4777                 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4778                 val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4779                 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4780                 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4781
4782                 for (i = 0; i < 100; i++) {
4783                         msleep(1);
4784                         val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4785                         if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4786                                 break;
4787                 }
4788         }
4789
4790         return;
4791 }
4792
4793
4794 static int
4795 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4796 {
4797         u32 val;
4798         int i, rc = 0;
4799         u8 old_port;
4800
4801         /* Wait for the current PCI transaction to complete before
4802          * issuing a reset. */
4803         bnx2_wait_dma_complete(bp);
4804
4805         /* Wait for the firmware to tell us it is ok to issue a reset. */
4806         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4807
4808         /* Deposit a driver reset signature so the firmware knows that
4809          * this is a soft reset. */
4810         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4811                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4812
4813         /* Do a dummy read to force the chip to complete all current transaction
4814          * before we issue a reset. */
4815         val = BNX2_RD(bp, BNX2_MISC_ID);
4816
4817         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4818                 BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4819                 BNX2_RD(bp, BNX2_MISC_COMMAND);
4820                 udelay(5);
4821
4822                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4823                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4824
4825                 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4826
4827         } else {
4828                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4829                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4830                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4831
4832                 /* Chip reset. */
4833                 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4834
4835                 /* Reading back any register after chip reset will hang the
4836                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4837                  * of margin for write posting.
4838                  */
4839                 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4840                     (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
4841                         msleep(20);
4842
4843                 /* Reset takes approximate 30 usec */
4844                 for (i = 0; i < 10; i++) {
4845                         val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4846                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4847                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4848                                 break;
4849                         udelay(10);
4850                 }
4851
4852                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4853                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4854                         pr_err("Chip reset did not complete\n");
4855                         return -EBUSY;
4856                 }
4857         }
4858
4859         /* Make sure byte swapping is properly configured. */
4860         val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
4861         if (val != 0x01020304) {
4862                 pr_err("Chip not in correct endian mode\n");
4863                 return -ENODEV;
4864         }
4865
4866         /* Wait for the firmware to finish its initialization. */
4867         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4868         if (rc)
4869                 return rc;
4870
4871         spin_lock_bh(&bp->phy_lock);
4872         old_port = bp->phy_port;
4873         bnx2_init_fw_cap(bp);
4874         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4875             old_port != bp->phy_port)
4876                 bnx2_set_default_remote_link(bp);
4877         spin_unlock_bh(&bp->phy_lock);
4878
4879         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4880                 /* Adjust the voltage regular to two steps lower.  The default
4881                  * of this register is 0x0000000e. */
4882                 BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4883
4884                 /* Remove bad rbuf memory from the free pool. */
4885                 rc = bnx2_alloc_bad_rbuf(bp);
4886         }
4887
4888         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4889                 bnx2_setup_msix_tbl(bp);
4890                 /* Prevent MSIX table reads and write from timing out */
4891                 BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
4892                         BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4893         }
4894
4895         return rc;
4896 }
4897
4898 static int
4899 bnx2_init_chip(struct bnx2 *bp)
4900 {
4901         u32 val, mtu;
4902         int rc, i;
4903
4904         /* Make sure the interrupt is not active. */
4905         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4906
4907         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4908               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4909 #ifdef __BIG_ENDIAN
4910               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4911 #endif
4912               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4913               DMA_READ_CHANS << 12 |
4914               DMA_WRITE_CHANS << 16;
4915
4916         val |= (0x2 << 20) | (1 << 11);
4917
4918         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4919                 val |= (1 << 23);
4920
4921         if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
4922             (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
4923             !(bp->flags & BNX2_FLAG_PCIX))
4924                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4925
4926         BNX2_WR(bp, BNX2_DMA_CONFIG, val);
4927
4928         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4929                 val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
4930                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4931                 BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
4932         }
4933
4934         if (bp->flags & BNX2_FLAG_PCIX) {
4935                 u16 val16;
4936
4937                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4938                                      &val16);
4939                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4940                                       val16 & ~PCI_X_CMD_ERO);
4941         }
4942
4943         BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4944                 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4945                 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4946                 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4947
4948         /* Initialize context mapping and zero out the quick contexts.  The
4949          * context block must have already been enabled. */
4950         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4951                 rc = bnx2_init_5709_context(bp);
4952                 if (rc)
4953                         return rc;
4954         } else
4955                 bnx2_init_context(bp);
4956
4957         if ((rc = bnx2_init_cpus(bp)) != 0)
4958                 return rc;
4959
4960         bnx2_init_nvram(bp);
4961
4962         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4963
4964         val = BNX2_RD(bp, BNX2_MQ_CONFIG);
4965         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4966         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4967         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4968                 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4969                 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
4970                         val |= BNX2_MQ_CONFIG_HALT_DIS;
4971         }
4972
4973         BNX2_WR(bp, BNX2_MQ_CONFIG, val);
4974
4975         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4976         BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4977         BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4978
4979         val = (BNX2_PAGE_BITS - 8) << 24;
4980         BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4981
4982         /* Configure page size. */
4983         val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4984         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4985         val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
4986         BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4987
4988         val = bp->mac_addr[0] +
4989               (bp->mac_addr[1] << 8) +
4990               (bp->mac_addr[2] << 16) +
4991               bp->mac_addr[3] +
4992               (bp->mac_addr[4] << 8) +
4993               (bp->mac_addr[5] << 16);
4994         BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4995
4996         /* Program the MTU.  Also include 4 bytes for CRC32. */
4997         mtu = bp->dev->mtu;
4998         val = mtu + ETH_HLEN + ETH_FCS_LEN;
4999         if (val > (MAX_ETHERNET_PACKET_SIZE + ETH_HLEN + 4))
5000                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
5001         BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
5002
5003         if (mtu < ETH_DATA_LEN)
5004                 mtu = ETH_DATA_LEN;
5005
5006         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
5007         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
5008         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
5009
5010         memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
5011         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5012                 bp->bnx2_napi[i].last_status_idx = 0;
5013
5014         bp->idle_chk_status_idx = 0xffff;
5015
5016         /* Set up how to generate a link change interrupt. */
5017         BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
5018
5019         BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
5020                 (u64) bp->status_blk_mapping & 0xffffffff);
5021         BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
5022
5023         BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
5024                 (u64) bp->stats_blk_mapping & 0xffffffff);
5025         BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
5026                 (u64) bp->stats_blk_mapping >> 32);
5027
5028         BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
5029                 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
5030
5031         BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
5032                 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
5033
5034         BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
5035                 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
5036
5037         BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
5038
5039         BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
5040
5041         BNX2_WR(bp, BNX2_HC_COM_TICKS,
5042                 (bp->com_ticks_int << 16) | bp->com_ticks);
5043
5044         BNX2_WR(bp, BNX2_HC_CMD_TICKS,
5045                 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
5046
5047         if (bp->flags & BNX2_FLAG_BROKEN_STATS)
5048                 BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
5049         else
5050                 BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
5051         BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
5052
5053         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
5054                 val = BNX2_HC_CONFIG_COLLECT_STATS;
5055         else {
5056                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
5057                       BNX2_HC_CONFIG_COLLECT_STATS;
5058         }
5059
5060         if (bp->flags & BNX2_FLAG_USING_MSIX) {
5061                 BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
5062                         BNX2_HC_MSIX_BIT_VECTOR_VAL);
5063
5064                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
5065         }
5066
5067         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
5068                 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
5069
5070         BNX2_WR(bp, BNX2_HC_CONFIG, val);
5071
5072         if (bp->rx_ticks < 25)
5073                 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5074         else
5075                 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5076
5077         for (i = 1; i < bp->irq_nvecs; i++) {
5078                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5079                            BNX2_HC_SB_CONFIG_1;
5080
5081                 BNX2_WR(bp, base,
5082                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5083                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5084                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5085
5086                 BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5087                         (bp->tx_quick_cons_trip_int << 16) |
5088                          bp->tx_quick_cons_trip);
5089
5090                 BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5091                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
5092
5093                 BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5094                         (bp->rx_quick_cons_trip_int << 16) |
5095                         bp->rx_quick_cons_trip);
5096
5097                 BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5098                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
5099         }
5100
5101         /* Clear internal stats counters. */
5102         BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5103
5104         BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5105
5106         /* Initialize the receive filter. */
5107         bnx2_set_rx_mode(bp->dev);
5108
5109         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5110                 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5111                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5112                 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5113         }
5114         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5115                           1, 0);
5116
5117         BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5118         BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5119
5120         udelay(20);
5121
5122         bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
5123
5124         return rc;
5125 }
5126
5127 static void
5128 bnx2_clear_ring_states(struct bnx2 *bp)
5129 {
5130         struct bnx2_napi *bnapi;
5131         struct bnx2_tx_ring_info *txr;
5132         struct bnx2_rx_ring_info *rxr;
5133         int i;
5134
5135         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5136                 bnapi = &bp->bnx2_napi[i];
5137                 txr = &bnapi->tx_ring;
5138                 rxr = &bnapi->rx_ring;
5139
5140                 txr->tx_cons = 0;
5141                 txr->hw_tx_cons = 0;
5142                 rxr->rx_prod_bseq = 0;
5143                 rxr->rx_prod = 0;
5144                 rxr->rx_cons = 0;
5145                 rxr->rx_pg_prod = 0;
5146                 rxr->rx_pg_cons = 0;
5147         }
5148 }
5149
5150 static void
5151 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5152 {
5153         u32 val, offset0, offset1, offset2, offset3;
5154         u32 cid_addr = GET_CID_ADDR(cid);
5155
5156         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5157                 offset0 = BNX2_L2CTX_TYPE_XI;
5158                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5159                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5160                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5161         } else {
5162                 offset0 = BNX2_L2CTX_TYPE;
5163                 offset1 = BNX2_L2CTX_CMD_TYPE;
5164                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5165                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5166         }
5167         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5168         bnx2_ctx_wr(bp, cid_addr, offset0, val);
5169
5170         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5171         bnx2_ctx_wr(bp, cid_addr, offset1, val);
5172
5173         val = (u64) txr->tx_desc_mapping >> 32;
5174         bnx2_ctx_wr(bp, cid_addr, offset2, val);
5175
5176         val = (u64) txr->tx_desc_mapping & 0xffffffff;
5177         bnx2_ctx_wr(bp, cid_addr, offset3, val);
5178 }
5179
5180 static void
5181 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5182 {
5183         struct bnx2_tx_bd *txbd;
5184         u32 cid = TX_CID;
5185         struct bnx2_napi *bnapi;
5186         struct bnx2_tx_ring_info *txr;
5187
5188         bnapi = &bp->bnx2_napi[ring_num];
5189         txr = &bnapi->tx_ring;
5190
5191         if (ring_num == 0)
5192                 cid = TX_CID;
5193         else
5194                 cid = TX_TSS_CID + ring_num - 1;
5195
5196         bp->tx_wake_thresh = bp->tx_ring_size / 2;
5197
5198         txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
5199
5200         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5201         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5202
5203         txr->tx_prod = 0;
5204         txr->tx_prod_bseq = 0;
5205
5206         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5207         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5208
5209         bnx2_init_tx_context(bp, cid, txr);
5210 }
5211
5212 static void
5213 bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
5214                      u32 buf_size, int num_rings)
5215 {
5216         int i;
5217         struct bnx2_rx_bd *rxbd;
5218
5219         for (i = 0; i < num_rings; i++) {
5220                 int j;
5221
5222                 rxbd = &rx_ring[i][0];
5223                 for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
5224                         rxbd->rx_bd_len = buf_size;
5225                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5226                 }
5227                 if (i == (num_rings - 1))
5228                         j = 0;
5229                 else
5230                         j = i + 1;
5231                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5232                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5233         }
5234 }
5235
5236 static void
5237 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5238 {
5239         int i;
5240         u16 prod, ring_prod;
5241         u32 cid, rx_cid_addr, val;
5242         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5243         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5244
5245         if (ring_num == 0)
5246                 cid = RX_CID;
5247         else
5248                 cid = RX_RSS_CID + ring_num - 1;
5249
5250         rx_cid_addr = GET_CID_ADDR(cid);
5251
5252         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5253                              bp->rx_buf_use_size, bp->rx_max_ring);
5254
5255         bnx2_init_rx_context(bp, cid);
5256
5257         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5258                 val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
5259                 BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5260         }
5261
5262         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5263         if (bp->rx_pg_ring_size) {
5264                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5265                                      rxr->rx_pg_desc_mapping,
5266                                      PAGE_SIZE, bp->rx_max_pg_ring);
5267                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5268                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5269                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5270                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5271
5272                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5273                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5274
5275                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5276                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5277
5278                 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5279                         BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5280         }
5281
5282         val = (u64) rxr->rx_desc_mapping[0] >> 32;
5283         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5284
5285         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5286         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5287
5288         ring_prod = prod = rxr->rx_pg_prod;
5289         for (i = 0; i < bp->rx_pg_ring_size; i++) {
5290                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5291                         netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5292                                     ring_num, i, bp->rx_pg_ring_size);
5293                         break;
5294                 }
5295                 prod = BNX2_NEXT_RX_BD(prod);
5296                 ring_prod = BNX2_RX_PG_RING_IDX(prod);
5297         }
5298         rxr->rx_pg_prod = prod;
5299
5300         ring_prod = prod = rxr->rx_prod;
5301         for (i = 0; i < bp->rx_ring_size; i++) {
5302                 if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5303                         netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5304                                     ring_num, i, bp->rx_ring_size);
5305                         break;
5306                 }
5307                 prod = BNX2_NEXT_RX_BD(prod);
5308                 ring_prod = BNX2_RX_RING_IDX(prod);
5309         }
5310         rxr->rx_prod = prod;
5311
5312         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5313         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5314         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5315
5316         BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5317         BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
5318
5319         BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5320 }
5321
5322 static void
5323 bnx2_init_all_rings(struct bnx2 *bp)
5324 {
5325         int i;
5326         u32 val;
5327
5328         bnx2_clear_ring_states(bp);
5329
5330         BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5331         for (i = 0; i < bp->num_tx_rings; i++)
5332                 bnx2_init_tx_ring(bp, i);
5333
5334         if (bp->num_tx_rings > 1)
5335                 BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5336                         (TX_TSS_CID << 7));
5337
5338         BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5339         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5340
5341         for (i = 0; i < bp->num_rx_rings; i++)
5342                 bnx2_init_rx_ring(bp, i);
5343
5344         if (bp->num_rx_rings > 1) {
5345                 u32 tbl_32 = 0;
5346
5347                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5348                         int shift = (i % 8) << 2;
5349
5350                         tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5351                         if ((i % 8) == 7) {
5352                                 BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5353                                 BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5354                                         BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5355                                         BNX2_RLUP_RSS_COMMAND_WRITE |
5356                                         BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5357                                 tbl_32 = 0;
5358                         }
5359                 }
5360
5361                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5362                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5363
5364                 BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5365
5366         }
5367 }
5368
5369 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5370 {
5371         u32 max, num_rings = 1;
5372
5373         while (ring_size > BNX2_MAX_RX_DESC_CNT) {
5374                 ring_size -= BNX2_MAX_RX_DESC_CNT;
5375                 num_rings++;
5376         }
5377         /* round to next power of 2 */
5378         max = max_size;
5379         while ((max & num_rings) == 0)
5380                 max >>= 1;
5381
5382         if (num_rings != max)
5383                 max <<= 1;
5384
5385         return max;
5386 }
5387
5388 static void
5389 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5390 {
5391         u32 rx_size, rx_space, jumbo_size;
5392
5393         /* 8 for CRC and VLAN */
5394         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5395
5396         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5397                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5398
5399         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5400         bp->rx_pg_ring_size = 0;
5401         bp->rx_max_pg_ring = 0;
5402         bp->rx_max_pg_ring_idx = 0;
5403         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5404                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5405
5406                 jumbo_size = size * pages;
5407                 if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
5408                         jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
5409
5410                 bp->rx_pg_ring_size = jumbo_size;
5411                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5412                                                         BNX2_MAX_RX_PG_RINGS);
5413                 bp->rx_max_pg_ring_idx =
5414                         (bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
5415                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5416                 bp->rx_copy_thresh = 0;
5417         }
5418
5419         bp->rx_buf_use_size = rx_size;
5420         /* hw alignment + build_skb() overhead*/
5421         bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5422                 NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5423         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5424         bp->rx_ring_size = size;
5425         bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
5426         bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
5427 }
5428
5429 static void
5430 bnx2_free_tx_skbs(struct bnx2 *bp)
5431 {
5432         int i;
5433
5434         for (i = 0; i < bp->num_tx_rings; i++) {
5435                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5436                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5437                 int j;
5438
5439                 if (txr->tx_buf_ring == NULL)
5440                         continue;
5441
5442                 for (j = 0; j < BNX2_TX_DESC_CNT; ) {
5443                         struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5444                         struct sk_buff *skb = tx_buf->skb;
5445                         int k, last;
5446
5447                         if (skb == NULL) {
5448                                 j = BNX2_NEXT_TX_BD(j);
5449                                 continue;
5450                         }
5451
5452                         dma_unmap_single(&bp->pdev->dev,
5453                                          dma_unmap_addr(tx_buf, mapping),
5454                                          skb_headlen(skb),
5455                                          PCI_DMA_TODEVICE);
5456
5457                         tx_buf->skb = NULL;
5458
5459                         last = tx_buf->nr_frags;
5460                         j = BNX2_NEXT_TX_BD(j);
5461                         for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
5462                                 tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
5463                                 dma_unmap_page(&bp->pdev->dev,
5464                                         dma_unmap_addr(tx_buf, mapping),
5465                                         skb_frag_size(&skb_shinfo(skb)->frags[k]),
5466                                         PCI_DMA_TODEVICE);
5467                         }
5468                         dev_kfree_skb(skb);
5469                 }
5470                 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5471         }
5472 }
5473
5474 static void
5475 bnx2_free_rx_skbs(struct bnx2 *bp)
5476 {
5477         int i;
5478
5479         for (i = 0; i < bp->num_rx_rings; i++) {
5480                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5481                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5482                 int j;
5483
5484                 if (rxr->rx_buf_ring == NULL)
5485                         return;
5486
5487                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5488                         struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5489                         u8 *data = rx_buf->data;
5490
5491                         if (data == NULL)
5492                                 continue;
5493
5494                         dma_unmap_single(&bp->pdev->dev,
5495                                          dma_unmap_addr(rx_buf, mapping),
5496                                          bp->rx_buf_use_size,
5497                                          PCI_DMA_FROMDEVICE);
5498
5499                         rx_buf->data = NULL;
5500
5501                         kfree(data);
5502                 }
5503                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5504                         bnx2_free_rx_page(bp, rxr, j);
5505         }
5506 }
5507
5508 static void
5509 bnx2_free_skbs(struct bnx2 *bp)
5510 {
5511         bnx2_free_tx_skbs(bp);
5512         bnx2_free_rx_skbs(bp);
5513 }
5514
5515 static int
5516 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5517 {
5518         int rc;
5519
5520         rc = bnx2_reset_chip(bp, reset_code);
5521         bnx2_free_skbs(bp);
5522         if (rc)
5523                 return rc;
5524
5525         if ((rc = bnx2_init_chip(bp)) != 0)
5526                 return rc;
5527
5528         bnx2_init_all_rings(bp);
5529         return 0;
5530 }
5531
5532 static int
5533 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5534 {
5535         int rc;
5536
5537         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5538                 return rc;
5539
5540         spin_lock_bh(&bp->phy_lock);
5541         bnx2_init_phy(bp, reset_phy);
5542         bnx2_set_link(bp);
5543         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5544                 bnx2_remote_phy_event(bp);
5545         spin_unlock_bh(&bp->phy_lock);
5546         return 0;
5547 }
5548
5549 static int
5550 bnx2_shutdown_chip(struct bnx2 *bp)
5551 {
5552         u32 reset_code;
5553
5554         if (bp->flags & BNX2_FLAG_NO_WOL)
5555                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5556         else if (bp->wol)
5557                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5558         else
5559                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5560
5561         return bnx2_reset_chip(bp, reset_code);
5562 }
5563
5564 static int
5565 bnx2_test_registers(struct bnx2 *bp)
5566 {
5567         int ret;
5568         int i, is_5709;
5569         static const struct {
5570                 u16   offset;
5571                 u16   flags;
5572 #define BNX2_FL_NOT_5709        1
5573                 u32   rw_mask;
5574                 u32   ro_mask;
5575         } reg_tbl[] = {
5576                 { 0x006c, 0, 0x00000000, 0x0000003f },
5577                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5578                 { 0x0094, 0, 0x00000000, 0x00000000 },
5579
5580                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5581                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5582                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5583                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5584                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5585                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5586                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5587                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5588                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5589
5590                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5591                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5592                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5593                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5594                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5595                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5596
5597                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5598                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5599                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5600
5601                 { 0x1000, 0, 0x00000000, 0x00000001 },
5602                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5603
5604                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5605                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5606                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5607                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5608                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5609                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5610                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5611                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5612                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5613                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5614
5615                 { 0x1800, 0, 0x00000000, 0x00000001 },
5616                 { 0x1804, 0, 0x00000000, 0x00000003 },
5617
5618                 { 0x2800, 0, 0x00000000, 0x00000001 },
5619                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5620                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5621                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5622                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5623                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5624                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5625                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5626                 { 0x2840, 0, 0x00000000, 0xffffffff },
5627                 { 0x2844, 0, 0x00000000, 0xffffffff },
5628                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5629                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5630
5631                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5632                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5633
5634                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5635                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5636                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5637                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5638                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5639                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5640                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5641                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5642                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5643
5644                 { 0x5004, 0, 0x00000000, 0x0000007f },
5645                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5646
5647                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5648                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5649                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5650                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5651                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5652                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5653                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5654                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5655                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5656
5657                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5658                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5659                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5660                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5661                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5662                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5663                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5664                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5665                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5666                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5667                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5668                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5669                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5670                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5671                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5672                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5673                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5674                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5675                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5676                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5677                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5678                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5679                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5680
5681                 { 0xffff, 0, 0x00000000, 0x00000000 },
5682         };
5683
5684         ret = 0;
5685         is_5709 = 0;
5686         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5687                 is_5709 = 1;
5688
5689         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5690                 u32 offset, rw_mask, ro_mask, save_val, val;
5691                 u16 flags = reg_tbl[i].flags;
5692
5693                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5694                         continue;
5695
5696                 offset = (u32) reg_tbl[i].offset;
5697                 rw_mask = reg_tbl[i].rw_mask;
5698                 ro_mask = reg_tbl[i].ro_mask;
5699
5700                 save_val = readl(bp->regview + offset);
5701
5702                 writel(0, bp->regview + offset);
5703
5704                 val = readl(bp->regview + offset);
5705                 if ((val & rw_mask) != 0) {
5706                         goto reg_test_err;
5707                 }
5708
5709                 if ((val & ro_mask) != (save_val & ro_mask)) {
5710                         goto reg_test_err;
5711                 }
5712
5713                 writel(0xffffffff, bp->regview + offset);
5714
5715                 val = readl(bp->regview + offset);
5716                 if ((val & rw_mask) != rw_mask) {
5717                         goto reg_test_err;
5718                 }
5719
5720                 if ((val & ro_mask) != (save_val & ro_mask)) {
5721                         goto reg_test_err;
5722                 }
5723
5724                 writel(save_val, bp->regview + offset);
5725                 continue;
5726
5727 reg_test_err:
5728                 writel(save_val, bp->regview + offset);
5729                 ret = -ENODEV;
5730                 break;
5731         }
5732         return ret;
5733 }
5734
5735 static int
5736 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5737 {
5738         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5739                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5740         int i;
5741
5742         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5743                 u32 offset;
5744
5745                 for (offset = 0; offset < size; offset += 4) {
5746
5747                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5748
5749                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5750                                 test_pattern[i]) {
5751                                 return -ENODEV;
5752                         }
5753                 }
5754         }
5755         return 0;
5756 }
5757
5758 static int
5759 bnx2_test_memory(struct bnx2 *bp)
5760 {
5761         int ret = 0;
5762         int i;
5763         static struct mem_entry {
5764                 u32   offset;
5765                 u32   len;
5766         } mem_tbl_5706[] = {
5767                 { 0x60000,  0x4000 },
5768                 { 0xa0000,  0x3000 },
5769                 { 0xe0000,  0x4000 },
5770                 { 0x120000, 0x4000 },
5771                 { 0x1a0000, 0x4000 },
5772                 { 0x160000, 0x4000 },
5773                 { 0xffffffff, 0    },
5774         },
5775         mem_tbl_5709[] = {
5776                 { 0x60000,  0x4000 },
5777                 { 0xa0000,  0x3000 },
5778                 { 0xe0000,  0x4000 },
5779                 { 0x120000, 0x4000 },
5780                 { 0x1a0000, 0x4000 },
5781                 { 0xffffffff, 0    },
5782         };
5783         struct mem_entry *mem_tbl;
5784
5785         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5786                 mem_tbl = mem_tbl_5709;
5787         else
5788                 mem_tbl = mem_tbl_5706;
5789
5790         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5791                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5792                         mem_tbl[i].len)) != 0) {
5793                         return ret;
5794                 }
5795         }
5796
5797         return ret;
5798 }
5799
5800 #define BNX2_MAC_LOOPBACK       0
5801 #define BNX2_PHY_LOOPBACK       1
5802
5803 static int
5804 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5805 {
5806         unsigned int pkt_size, num_pkts, i;
5807         struct sk_buff *skb;
5808         u8 *data;
5809         unsigned char *packet;
5810         u16 rx_start_idx, rx_idx;
5811         dma_addr_t map;
5812         struct bnx2_tx_bd *txbd;
5813         struct bnx2_sw_bd *rx_buf;
5814         struct l2_fhdr *rx_hdr;
5815         int ret = -ENODEV;
5816         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5817         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5818         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5819
5820         tx_napi = bnapi;
5821
5822         txr = &tx_napi->tx_ring;
5823         rxr = &bnapi->rx_ring;
5824         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5825                 bp->loopback = MAC_LOOPBACK;
5826                 bnx2_set_mac_loopback(bp);
5827         }
5828         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5829                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5830                         return 0;
5831
5832                 bp->loopback = PHY_LOOPBACK;
5833                 bnx2_set_phy_loopback(bp);
5834         }
5835         else
5836                 return -EINVAL;
5837
5838         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5839         skb = netdev_alloc_skb(bp->dev, pkt_size);
5840         if (!skb)
5841                 return -ENOMEM;
5842         packet = skb_put(skb, pkt_size);
5843         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
5844         memset(packet + ETH_ALEN, 0x0, 8);
5845         for (i = 14; i < pkt_size; i++)
5846                 packet[i] = (unsigned char) (i & 0xff);
5847
5848         map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5849                              PCI_DMA_TODEVICE);
5850         if (dma_mapping_error(&bp->pdev->dev, map)) {
5851                 dev_kfree_skb(skb);
5852                 return -EIO;
5853         }
5854
5855         BNX2_WR(bp, BNX2_HC_COMMAND,
5856                 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5857
5858         BNX2_RD(bp, BNX2_HC_COMMAND);
5859
5860         udelay(5);
5861         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5862
5863         num_pkts = 0;
5864
5865         txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
5866
5867         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5868         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5869         txbd->tx_bd_mss_nbytes = pkt_size;
5870         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5871
5872         num_pkts++;
5873         txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
5874         txr->tx_prod_bseq += pkt_size;
5875
5876         BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5877         BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5878
5879         udelay(100);
5880
5881         BNX2_WR(bp, BNX2_HC_COMMAND,
5882                 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5883
5884         BNX2_RD(bp, BNX2_HC_COMMAND);
5885
5886         udelay(5);
5887
5888         dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5889         dev_kfree_skb(skb);
5890
5891         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5892                 goto loopback_test_done;
5893
5894         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5895         if (rx_idx != rx_start_idx + num_pkts) {
5896                 goto loopback_test_done;
5897         }
5898
5899         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5900         data = rx_buf->data;
5901
5902         rx_hdr = get_l2_fhdr(data);
5903         data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5904
5905         dma_sync_single_for_cpu(&bp->pdev->dev,
5906                 dma_unmap_addr(rx_buf, mapping),
5907                 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
5908
5909         if (rx_hdr->l2_fhdr_status &
5910                 (L2_FHDR_ERRORS_BAD_CRC |
5911                 L2_FHDR_ERRORS_PHY_DECODE |
5912                 L2_FHDR_ERRORS_ALIGNMENT |
5913                 L2_FHDR_ERRORS_TOO_SHORT |
5914                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5915
5916                 goto loopback_test_done;
5917         }
5918
5919         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5920                 goto loopback_test_done;
5921         }
5922
5923         for (i = 14; i < pkt_size; i++) {
5924                 if (*(data + i) != (unsigned char) (i & 0xff)) {
5925                         goto loopback_test_done;
5926                 }
5927         }
5928
5929         ret = 0;
5930
5931 loopback_test_done:
5932         bp->loopback = 0;
5933         return ret;
5934 }
5935
5936 #define BNX2_MAC_LOOPBACK_FAILED        1
5937 #define BNX2_PHY_LOOPBACK_FAILED        2
5938 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5939                                          BNX2_PHY_LOOPBACK_FAILED)
5940
5941 static int
5942 bnx2_test_loopback(struct bnx2 *bp)
5943 {
5944         int rc = 0;
5945
5946         if (!netif_running(bp->dev))
5947                 return BNX2_LOOPBACK_FAILED;
5948
5949         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5950         spin_lock_bh(&bp->phy_lock);
5951         bnx2_init_phy(bp, 1);
5952         spin_unlock_bh(&bp->phy_lock);
5953         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5954                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5955         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5956                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5957         return rc;
5958 }
5959
5960 #define NVRAM_SIZE 0x200
5961 #define CRC32_RESIDUAL 0xdebb20e3
5962
5963 static int
5964 bnx2_test_nvram(struct bnx2 *bp)
5965 {
5966         __be32 buf[NVRAM_SIZE / 4];
5967         u8 *data = (u8 *) buf;
5968         int rc = 0;
5969         u32 magic, csum;
5970
5971         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5972                 goto test_nvram_done;
5973
5974         magic = be32_to_cpu(buf[0]);
5975         if (magic != 0x669955aa) {
5976                 rc = -ENODEV;
5977                 goto test_nvram_done;
5978         }
5979
5980         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5981                 goto test_nvram_done;
5982
5983         csum = ether_crc_le(0x100, data);
5984         if (csum != CRC32_RESIDUAL) {
5985                 rc = -ENODEV;
5986                 goto test_nvram_done;
5987         }
5988
5989         csum = ether_crc_le(0x100, data + 0x100);
5990         if (csum != CRC32_RESIDUAL) {
5991                 rc = -ENODEV;
5992         }
5993
5994 test_nvram_done:
5995         return rc;
5996 }
5997
5998 static int
5999 bnx2_test_link(struct bnx2 *bp)
6000 {
6001         u32 bmsr;
6002
6003         if (!netif_running(bp->dev))
6004                 return -ENODEV;
6005
6006         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6007                 if (bp->link_up)
6008                         return 0;
6009                 return -ENODEV;
6010         }
6011         spin_lock_bh(&bp->phy_lock);
6012         bnx2_enable_bmsr1(bp);
6013         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
6014         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
6015         bnx2_disable_bmsr1(bp);
6016         spin_unlock_bh(&bp->phy_lock);
6017
6018         if (bmsr & BMSR_LSTATUS) {
6019                 return 0;
6020         }
6021         return -ENODEV;
6022 }
6023
6024 static int
6025 bnx2_test_intr(struct bnx2 *bp)
6026 {
6027         int i;
6028         u16 status_idx;
6029
6030         if (!netif_running(bp->dev))
6031                 return -ENODEV;
6032
6033         status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
6034
6035         /* This register is not touched during run-time. */
6036         BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
6037         BNX2_RD(bp, BNX2_HC_COMMAND);
6038
6039         for (i = 0; i < 10; i++) {
6040                 if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
6041                         status_idx) {
6042
6043                         break;
6044                 }
6045
6046                 msleep_interruptible(10);
6047         }
6048         if (i < 10)
6049                 return 0;
6050
6051         return -ENODEV;
6052 }
6053
6054 /* Determining link for parallel detection. */
6055 static int
6056 bnx2_5706_serdes_has_link(struct bnx2 *bp)
6057 {
6058         u32 mode_ctl, an_dbg, exp;
6059
6060         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
6061                 return 0;
6062
6063         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
6064         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
6065
6066         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
6067                 return 0;
6068
6069         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6070         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6071         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6072
6073         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
6074                 return 0;
6075
6076         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
6077         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6078         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6079
6080         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
6081                 return 0;
6082
6083         return 1;
6084 }
6085
6086 static void
6087 bnx2_5706_serdes_timer(struct bnx2 *bp)
6088 {
6089         int check_link = 1;
6090
6091         spin_lock(&bp->phy_lock);
6092         if (bp->serdes_an_pending) {
6093                 bp->serdes_an_pending--;
6094                 check_link = 0;
6095         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6096                 u32 bmcr;
6097
6098                 bp->current_interval = BNX2_TIMER_INTERVAL;
6099
6100                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6101
6102                 if (bmcr & BMCR_ANENABLE) {
6103                         if (bnx2_5706_serdes_has_link(bp)) {
6104                                 bmcr &= ~BMCR_ANENABLE;
6105                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6106                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6107                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6108                         }
6109                 }
6110         }
6111         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6112                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6113                 u32 phy2;
6114
6115                 bnx2_write_phy(bp, 0x17, 0x0f01);
6116                 bnx2_read_phy(bp, 0x15, &phy2);
6117                 if (phy2 & 0x20) {
6118                         u32 bmcr;
6119
6120                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6121                         bmcr |= BMCR_ANENABLE;
6122                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6123
6124                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6125                 }
6126         } else
6127                 bp->current_interval = BNX2_TIMER_INTERVAL;
6128
6129         if (check_link) {
6130                 u32 val;
6131
6132                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6133                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6134                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6135
6136                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6137                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6138                                 bnx2_5706s_force_link_dn(bp, 1);
6139                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6140                         } else
6141                                 bnx2_set_link(bp);
6142                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6143                         bnx2_set_link(bp);
6144         }
6145         spin_unlock(&bp->phy_lock);
6146 }
6147
6148 static void
6149 bnx2_5708_serdes_timer(struct bnx2 *bp)
6150 {
6151         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6152                 return;
6153
6154         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6155                 bp->serdes_an_pending = 0;
6156                 return;
6157         }
6158
6159         spin_lock(&bp->phy_lock);
6160         if (bp->serdes_an_pending)
6161                 bp->serdes_an_pending--;
6162         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6163                 u32 bmcr;
6164
6165                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6166                 if (bmcr & BMCR_ANENABLE) {
6167                         bnx2_enable_forced_2g5(bp);
6168                         bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6169                 } else {
6170                         bnx2_disable_forced_2g5(bp);
6171                         bp->serdes_an_pending = 2;
6172                         bp->current_interval = BNX2_TIMER_INTERVAL;
6173                 }
6174
6175         } else
6176                 bp->current_interval = BNX2_TIMER_INTERVAL;
6177
6178         spin_unlock(&bp->phy_lock);
6179 }
6180
6181 static void
6182 bnx2_timer(unsigned long data)
6183 {
6184         struct bnx2 *bp = (struct bnx2 *) data;
6185
6186         if (!netif_running(bp->dev))
6187                 return;
6188
6189         if (atomic_read(&bp->intr_sem) != 0)
6190                 goto bnx2_restart_timer;
6191
6192         if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6193              BNX2_FLAG_USING_MSI)
6194                 bnx2_chk_missed_msi(bp);
6195
6196         bnx2_send_heart_beat(bp);
6197
6198         bp->stats_blk->stat_FwRxDrop =
6199                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6200
6201         /* workaround occasional corrupted counters */
6202         if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6203                 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6204                         BNX2_HC_COMMAND_STATS_NOW);
6205
6206         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6207                 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
6208                         bnx2_5706_serdes_timer(bp);
6209                 else
6210                         bnx2_5708_serdes_timer(bp);
6211         }
6212
6213 bnx2_restart_timer:
6214         mod_timer(&bp->timer, jiffies + bp->current_interval);
6215 }
6216
6217 static int
6218 bnx2_request_irq(struct bnx2 *bp)
6219 {
6220         unsigned long flags;
6221         struct bnx2_irq *irq;
6222         int rc = 0, i;
6223
6224         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6225                 flags = 0;
6226         else
6227                 flags = IRQF_SHARED;
6228
6229         for (i = 0; i < bp->irq_nvecs; i++) {
6230                 irq = &bp->irq_tbl[i];
6231                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6232                                  &bp->bnx2_napi[i]);
6233                 if (rc)
6234                         break;
6235                 irq->requested = 1;
6236         }
6237         return rc;
6238 }
6239
6240 static void
6241 __bnx2_free_irq(struct bnx2 *bp)
6242 {
6243         struct bnx2_irq *irq;
6244         int i;
6245
6246         for (i = 0; i < bp->irq_nvecs; i++) {
6247                 irq = &bp->irq_tbl[i];
6248                 if (irq->requested)
6249                         free_irq(irq->vector, &bp->bnx2_napi[i]);
6250                 irq->requested = 0;
6251         }
6252 }
6253
6254 static void
6255 bnx2_free_irq(struct bnx2 *bp)
6256 {
6257
6258         __bnx2_free_irq(bp);
6259         if (bp->flags & BNX2_FLAG_USING_MSI)
6260                 pci_disable_msi(bp->pdev);
6261         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6262                 pci_disable_msix(bp->pdev);
6263
6264         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6265 }
6266
6267 static void
6268 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6269 {
6270         int i, total_vecs;
6271         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6272         struct net_device *dev = bp->dev;
6273         const int len = sizeof(bp->irq_tbl[0].name);
6274
6275         bnx2_setup_msix_tbl(bp);
6276         BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6277         BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6278         BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6279
6280         /*  Need to flush the previous three writes to ensure MSI-X
6281          *  is setup properly */
6282         BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
6283
6284         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6285                 msix_ent[i].entry = i;
6286                 msix_ent[i].vector = 0;
6287         }
6288
6289         total_vecs = msix_vecs;
6290 #ifdef BCM_CNIC
6291         total_vecs++;
6292 #endif
6293         total_vecs = pci_enable_msix_range(bp->pdev, msix_ent,
6294                                            BNX2_MIN_MSIX_VEC, total_vecs);
6295         if (total_vecs < 0)
6296                 return;
6297
6298         msix_vecs = total_vecs;
6299 #ifdef BCM_CNIC
6300         msix_vecs--;
6301 #endif
6302         bp->irq_nvecs = msix_vecs;
6303         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6304         for (i = 0; i < total_vecs; i++) {
6305                 bp->irq_tbl[i].vector = msix_ent[i].vector;
6306                 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6307                 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6308         }
6309 }
6310
6311 static int
6312 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6313 {
6314         int cpus = netif_get_num_default_rss_queues();
6315         int msix_vecs;
6316
6317         if (!bp->num_req_rx_rings)
6318                 msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6319         else if (!bp->num_req_tx_rings)
6320                 msix_vecs = max(cpus, bp->num_req_rx_rings);
6321         else
6322                 msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6323
6324         msix_vecs = min(msix_vecs, RX_MAX_RINGS);
6325
6326         bp->irq_tbl[0].handler = bnx2_interrupt;
6327         strcpy(bp->irq_tbl[0].name, bp->dev->name);
6328         bp->irq_nvecs = 1;
6329         bp->irq_tbl[0].vector = bp->pdev->irq;
6330
6331         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6332                 bnx2_enable_msix(bp, msix_vecs);
6333
6334         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6335             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6336                 if (pci_enable_msi(bp->pdev) == 0) {
6337                         bp->flags |= BNX2_FLAG_USING_MSI;
6338                         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
6339                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6340                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6341                         } else
6342                                 bp->irq_tbl[0].handler = bnx2_msi;
6343
6344                         bp->irq_tbl[0].vector = bp->pdev->irq;
6345                 }
6346         }
6347
6348         if (!bp->num_req_tx_rings)
6349                 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6350         else
6351                 bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6352
6353         if (!bp->num_req_rx_rings)
6354                 bp->num_rx_rings = bp->irq_nvecs;
6355         else
6356                 bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6357
6358         netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6359
6360         return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6361 }
6362
6363 /* Called with rtnl_lock */
6364 static int
6365 bnx2_open(struct net_device *dev)
6366 {
6367         struct bnx2 *bp = netdev_priv(dev);
6368         int rc;
6369
6370         rc = bnx2_request_firmware(bp);
6371         if (rc < 0)
6372                 goto out;
6373
6374         netif_carrier_off(dev);
6375
6376         bnx2_disable_int(bp);
6377
6378         rc = bnx2_setup_int_mode(bp, disable_msi);
6379         if (rc)
6380                 goto open_err;
6381         bnx2_init_napi(bp);
6382         bnx2_napi_enable(bp);
6383         rc = bnx2_alloc_mem(bp);
6384         if (rc)
6385                 goto open_err;
6386
6387         rc = bnx2_request_irq(bp);
6388         if (rc)
6389                 goto open_err;
6390
6391         rc = bnx2_init_nic(bp, 1);
6392         if (rc)
6393                 goto open_err;
6394
6395         mod_timer(&bp->timer, jiffies + bp->current_interval);
6396
6397         atomic_set(&bp->intr_sem, 0);
6398
6399         memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6400
6401         bnx2_enable_int(bp);
6402
6403         if (bp->flags & BNX2_FLAG_USING_MSI) {
6404                 /* Test MSI to make sure it is working
6405                  * If MSI test fails, go back to INTx mode
6406                  */
6407                 if (bnx2_test_intr(bp) != 0) {
6408                         netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6409
6410                         bnx2_disable_int(bp);
6411                         bnx2_free_irq(bp);
6412
6413                         bnx2_setup_int_mode(bp, 1);
6414
6415                         rc = bnx2_init_nic(bp, 0);
6416
6417                         if (!rc)
6418                                 rc = bnx2_request_irq(bp);
6419
6420                         if (rc) {
6421                                 del_timer_sync(&bp->timer);
6422                                 goto open_err;
6423                         }
6424                         bnx2_enable_int(bp);
6425                 }
6426         }
6427         if (bp->flags & BNX2_FLAG_USING_MSI)
6428                 netdev_info(dev, "using MSI\n");
6429         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6430                 netdev_info(dev, "using MSIX\n");
6431
6432         netif_tx_start_all_queues(dev);
6433 out:
6434         return rc;
6435
6436 open_err:
6437         bnx2_napi_disable(bp);
6438         bnx2_free_skbs(bp);
6439         bnx2_free_irq(bp);
6440         bnx2_free_mem(bp);
6441         bnx2_del_napi(bp);
6442         bnx2_release_firmware(bp);
6443         goto out;
6444 }
6445
6446 static void
6447 bnx2_reset_task(struct work_struct *work)
6448 {
6449         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6450         int rc;
6451         u16 pcicmd;
6452
6453         rtnl_lock();
6454         if (!netif_running(bp->dev)) {
6455                 rtnl_unlock();
6456                 return;
6457         }
6458
6459         bnx2_netif_stop(bp, true);
6460
6461         pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
6462         if (!(pcicmd & PCI_COMMAND_MEMORY)) {
6463                 /* in case PCI block has reset */
6464                 pci_restore_state(bp->pdev);
6465                 pci_save_state(bp->pdev);
6466         }
6467         rc = bnx2_init_nic(bp, 1);
6468         if (rc) {
6469                 netdev_err(bp->dev, "failed to reset NIC, closing\n");
6470                 bnx2_napi_enable(bp);
6471                 dev_close(bp->dev);
6472                 rtnl_unlock();
6473                 return;
6474         }
6475
6476         atomic_set(&bp->intr_sem, 1);
6477         bnx2_netif_start(bp, true);
6478         rtnl_unlock();
6479 }
6480
6481 #define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
6482
6483 static void
6484 bnx2_dump_ftq(struct bnx2 *bp)
6485 {
6486         int i;
6487         u32 reg, bdidx, cid, valid;
6488         struct net_device *dev = bp->dev;
6489         static const struct ftq_reg {
6490                 char *name;
6491                 u32 off;
6492         } ftq_arr[] = {
6493                 BNX2_FTQ_ENTRY(RV2P_P),
6494                 BNX2_FTQ_ENTRY(RV2P_T),
6495                 BNX2_FTQ_ENTRY(RV2P_M),
6496                 BNX2_FTQ_ENTRY(TBDR_),
6497                 BNX2_FTQ_ENTRY(TDMA_),
6498                 BNX2_FTQ_ENTRY(TXP_),
6499                 BNX2_FTQ_ENTRY(TXP_),
6500                 BNX2_FTQ_ENTRY(TPAT_),
6501                 BNX2_FTQ_ENTRY(RXP_C),
6502                 BNX2_FTQ_ENTRY(RXP_),
6503                 BNX2_FTQ_ENTRY(COM_COMXQ_),
6504                 BNX2_FTQ_ENTRY(COM_COMTQ_),
6505                 BNX2_FTQ_ENTRY(COM_COMQ_),
6506                 BNX2_FTQ_ENTRY(CP_CPQ_),
6507         };
6508
6509         netdev_err(dev, "<--- start FTQ dump --->\n");
6510         for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
6511                 netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
6512                            bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6513
6514         netdev_err(dev, "CPU states:\n");
6515         for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
6516                 netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
6517                            reg, bnx2_reg_rd_ind(bp, reg),
6518                            bnx2_reg_rd_ind(bp, reg + 4),
6519                            bnx2_reg_rd_ind(bp, reg + 8),
6520                            bnx2_reg_rd_ind(bp, reg + 0x1c),
6521                            bnx2_reg_rd_ind(bp, reg + 0x1c),
6522                            bnx2_reg_rd_ind(bp, reg + 0x20));
6523
6524         netdev_err(dev, "<--- end FTQ dump --->\n");
6525         netdev_err(dev, "<--- start TBDC dump --->\n");
6526         netdev_err(dev, "TBDC free cnt: %ld\n",
6527                    BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6528         netdev_err(dev, "LINE     CID  BIDX   CMD  VALIDS\n");
6529         for (i = 0; i < 0x20; i++) {
6530                 int j = 0;
6531
6532                 BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
6533                 BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
6534                         BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6535                 BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6536                 while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
6537                         BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6538                         j++;
6539
6540                 cid = BNX2_RD(bp, BNX2_TBDC_CID);
6541                 bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
6542                 valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
6543                 netdev_err(dev, "%02x    %06x  %04lx   %02x    [%x]\n",
6544                            i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6545                            bdidx >> 24, (valid >> 8) & 0x0ff);
6546         }
6547         netdev_err(dev, "<--- end TBDC dump --->\n");
6548 }
6549
6550 static void
6551 bnx2_dump_state(struct bnx2 *bp)
6552 {
6553         struct net_device *dev = bp->dev;
6554         u32 val1, val2;
6555
6556         pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6557         netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6558                    atomic_read(&bp->intr_sem), val1);
6559         pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6560         pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6561         netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6562         netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6563                    BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
6564                    BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
6565         netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6566                    BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6567         netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6568                    BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6569         if (bp->flags & BNX2_FLAG_USING_MSIX)
6570                 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6571                            BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6572 }
6573
6574 static void
6575 bnx2_tx_timeout(struct net_device *dev)
6576 {
6577         struct bnx2 *bp = netdev_priv(dev);
6578
6579         bnx2_dump_ftq(bp);
6580         bnx2_dump_state(bp);
6581         bnx2_dump_mcp_state(bp);
6582
6583         /* This allows the netif to be shutdown gracefully before resetting */
6584         schedule_work(&bp->reset_task);
6585 }
6586
6587 /* Called with netif_tx_lock.
6588  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6589  * netif_wake_queue().
6590  */
6591 static netdev_tx_t
6592 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6593 {
6594         struct bnx2 *bp = netdev_priv(dev);
6595         dma_addr_t mapping;
6596         struct bnx2_tx_bd *txbd;
6597         struct bnx2_sw_tx_bd *tx_buf;
6598         u32 len, vlan_tag_flags, last_frag, mss;
6599         u16 prod, ring_prod;
6600         int i;
6601         struct bnx2_napi *bnapi;
6602         struct bnx2_tx_ring_info *txr;
6603         struct netdev_queue *txq;
6604
6605         /*  Determine which tx ring we will be placed on */
6606         i = skb_get_queue_mapping(skb);
6607         bnapi = &bp->bnx2_napi[i];
6608         txr = &bnapi->tx_ring;
6609         txq = netdev_get_tx_queue(dev, i);
6610
6611         if (unlikely(bnx2_tx_avail(bp, txr) <
6612             (skb_shinfo(skb)->nr_frags + 1))) {
6613                 netif_tx_stop_queue(txq);
6614                 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6615
6616                 return NETDEV_TX_BUSY;
6617         }
6618         len = skb_headlen(skb);
6619         prod = txr->tx_prod;
6620         ring_prod = BNX2_TX_RING_IDX(prod);
6621
6622         vlan_tag_flags = 0;
6623         if (skb->ip_summed == CHECKSUM_PARTIAL) {
6624                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6625         }
6626
6627         if (skb_vlan_tag_present(skb)) {
6628                 vlan_tag_flags |=
6629                         (TX_BD_FLAGS_VLAN_TAG | (skb_vlan_tag_get(skb) << 16));
6630         }
6631
6632         if ((mss = skb_shinfo(skb)->gso_size)) {
6633                 u32 tcp_opt_len;
6634                 struct iphdr *iph;
6635
6636                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6637
6638                 tcp_opt_len = tcp_optlen(skb);
6639
6640                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6641                         u32 tcp_off = skb_transport_offset(skb) -
6642                                       sizeof(struct ipv6hdr) - ETH_HLEN;
6643
6644                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6645                                           TX_BD_FLAGS_SW_FLAGS;
6646                         if (likely(tcp_off == 0))
6647                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6648                         else {
6649                                 tcp_off >>= 3;
6650                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
6651                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
6652                                                   ((tcp_off & 0x10) <<
6653                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
6654                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6655                         }
6656                 } else {
6657                         iph = ip_hdr(skb);
6658                         if (tcp_opt_len || (iph->ihl > 5)) {
6659                                 vlan_tag_flags |= ((iph->ihl - 5) +
6660                                                    (tcp_opt_len >> 2)) << 8;
6661                         }
6662                 }
6663         } else
6664                 mss = 0;
6665
6666         mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6667         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6668                 dev_kfree_skb_any(skb);
6669                 return NETDEV_TX_OK;
6670         }
6671
6672         tx_buf = &txr->tx_buf_ring[ring_prod];
6673         tx_buf->skb = skb;
6674         dma_unmap_addr_set(tx_buf, mapping, mapping);
6675
6676         txbd = &txr->tx_desc_ring[ring_prod];
6677
6678         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6679         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6680         txbd->tx_bd_mss_nbytes = len | (mss << 16);
6681         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6682
6683         last_frag = skb_shinfo(skb)->nr_frags;
6684         tx_buf->nr_frags = last_frag;
6685         tx_buf->is_gso = skb_is_gso(skb);
6686
6687         for (i = 0; i < last_frag; i++) {
6688                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6689
6690                 prod = BNX2_NEXT_TX_BD(prod);
6691                 ring_prod = BNX2_TX_RING_IDX(prod);
6692                 txbd = &txr->tx_desc_ring[ring_prod];
6693
6694                 len = skb_frag_size(frag);
6695                 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6696                                            DMA_TO_DEVICE);
6697                 if (dma_mapping_error(&bp->pdev->dev, mapping))
6698                         goto dma_error;
6699                 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6700                                    mapping);
6701
6702                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6703                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6704                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6705                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6706
6707         }
6708         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6709
6710         /* Sync BD data before updating TX mailbox */
6711         wmb();
6712
6713         netdev_tx_sent_queue(txq, skb->len);
6714
6715         prod = BNX2_NEXT_TX_BD(prod);
6716         txr->tx_prod_bseq += skb->len;
6717
6718         BNX2_WR16(bp, txr->tx_bidx_addr, prod);
6719         BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6720
6721         mmiowb();
6722
6723         txr->tx_prod = prod;
6724
6725         if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6726                 netif_tx_stop_queue(txq);
6727
6728                 /* netif_tx_stop_queue() must be done before checking
6729                  * tx index in bnx2_tx_avail() below, because in
6730                  * bnx2_tx_int(), we update tx index before checking for
6731                  * netif_tx_queue_stopped().
6732                  */
6733                 smp_mb();
6734                 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6735                         netif_tx_wake_queue(txq);
6736         }
6737
6738         return NETDEV_TX_OK;
6739 dma_error:
6740         /* save value of frag that failed */
6741         last_frag = i;
6742
6743         /* start back at beginning and unmap skb */
6744         prod = txr->tx_prod;
6745         ring_prod = BNX2_TX_RING_IDX(prod);
6746         tx_buf = &txr->tx_buf_ring[ring_prod];
6747         tx_buf->skb = NULL;
6748         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6749                          skb_headlen(skb), PCI_DMA_TODEVICE);
6750
6751         /* unmap remaining mapped pages */
6752         for (i = 0; i < last_frag; i++) {
6753                 prod = BNX2_NEXT_TX_BD(prod);
6754                 ring_prod = BNX2_TX_RING_IDX(prod);
6755                 tx_buf = &txr->tx_buf_ring[ring_prod];
6756                 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6757                                skb_frag_size(&skb_shinfo(skb)->frags[i]),
6758                                PCI_DMA_TODEVICE);
6759         }
6760
6761         dev_kfree_skb_any(skb);
6762         return NETDEV_TX_OK;
6763 }
6764
6765 /* Called with rtnl_lock */
6766 static int
6767 bnx2_close(struct net_device *dev)
6768 {
6769         struct bnx2 *bp = netdev_priv(dev);
6770
6771         bnx2_disable_int_sync(bp);
6772         bnx2_napi_disable(bp);
6773         netif_tx_disable(dev);
6774         del_timer_sync(&bp->timer);
6775         bnx2_shutdown_chip(bp);
6776         bnx2_free_irq(bp);
6777         bnx2_free_skbs(bp);
6778         bnx2_free_mem(bp);
6779         bnx2_del_napi(bp);
6780         bp->link_up = 0;
6781         netif_carrier_off(bp->dev);
6782         return 0;
6783 }
6784
6785 static void
6786 bnx2_save_stats(struct bnx2 *bp)
6787 {
6788         u32 *hw_stats = (u32 *) bp->stats_blk;
6789         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6790         int i;
6791
6792         /* The 1st 10 counters are 64-bit counters */
6793         for (i = 0; i < 20; i += 2) {
6794                 u32 hi;
6795                 u64 lo;
6796
6797                 hi = temp_stats[i] + hw_stats[i];
6798                 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6799                 if (lo > 0xffffffff)
6800                         hi++;
6801                 temp_stats[i] = hi;
6802                 temp_stats[i + 1] = lo & 0xffffffff;
6803         }
6804
6805         for ( ; i < sizeof(struct statistics_block) / 4; i++)
6806                 temp_stats[i] += hw_stats[i];
6807 }
6808
6809 #define GET_64BIT_NET_STATS64(ctr)              \
6810         (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6811
6812 #define GET_64BIT_NET_STATS(ctr)                                \
6813         GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +             \
6814         GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6815
6816 #define GET_32BIT_NET_STATS(ctr)                                \
6817         (unsigned long) (bp->stats_blk->ctr +                   \
6818                          bp->temp_stats_blk->ctr)
6819
6820 static void
6821 bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6822 {
6823         struct bnx2 *bp = netdev_priv(dev);
6824
6825         if (bp->stats_blk == NULL)
6826                 return;
6827
6828         net_stats->rx_packets =
6829                 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6830                 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6831                 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6832
6833         net_stats->tx_packets =
6834                 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6835                 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6836                 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6837
6838         net_stats->rx_bytes =
6839                 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6840
6841         net_stats->tx_bytes =
6842                 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6843
6844         net_stats->multicast =
6845                 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6846
6847         net_stats->collisions =
6848                 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6849
6850         net_stats->rx_length_errors =
6851                 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6852                 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6853
6854         net_stats->rx_over_errors =
6855                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6856                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6857
6858         net_stats->rx_frame_errors =
6859                 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6860
6861         net_stats->rx_crc_errors =
6862                 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6863
6864         net_stats->rx_errors = net_stats->rx_length_errors +
6865                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6866                 net_stats->rx_crc_errors;
6867
6868         net_stats->tx_aborted_errors =
6869                 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6870                 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6871
6872         if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
6873             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
6874                 net_stats->tx_carrier_errors = 0;
6875         else {
6876                 net_stats->tx_carrier_errors =
6877                         GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6878         }
6879
6880         net_stats->tx_errors =
6881                 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6882                 net_stats->tx_aborted_errors +
6883                 net_stats->tx_carrier_errors;
6884
6885         net_stats->rx_missed_errors =
6886                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6887                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6888                 GET_32BIT_NET_STATS(stat_FwRxDrop);
6889
6890 }
6891
6892 /* All ethtool functions called with rtnl_lock */
6893
6894 static int
6895 bnx2_get_link_ksettings(struct net_device *dev,
6896                         struct ethtool_link_ksettings *cmd)
6897 {
6898         struct bnx2 *bp = netdev_priv(dev);
6899         int support_serdes = 0, support_copper = 0;
6900         u32 supported, advertising;
6901
6902         supported = SUPPORTED_Autoneg;
6903         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6904                 support_serdes = 1;
6905                 support_copper = 1;
6906         } else if (bp->phy_port == PORT_FIBRE)
6907                 support_serdes = 1;
6908         else
6909                 support_copper = 1;
6910
6911         if (support_serdes) {
6912                 supported |= SUPPORTED_1000baseT_Full |
6913                         SUPPORTED_FIBRE;
6914                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6915                         supported |= SUPPORTED_2500baseX_Full;
6916         }
6917         if (support_copper) {
6918                 supported |= SUPPORTED_10baseT_Half |
6919                         SUPPORTED_10baseT_Full |
6920                         SUPPORTED_100baseT_Half |
6921                         SUPPORTED_100baseT_Full |
6922                         SUPPORTED_1000baseT_Full |
6923                         SUPPORTED_TP;
6924         }
6925
6926         spin_lock_bh(&bp->phy_lock);
6927         cmd->base.port = bp->phy_port;
6928         advertising = bp->advertising;
6929
6930         if (bp->autoneg & AUTONEG_SPEED) {
6931                 cmd->base.autoneg = AUTONEG_ENABLE;
6932         } else {
6933                 cmd->base.autoneg = AUTONEG_DISABLE;
6934         }
6935
6936         if (netif_carrier_ok(dev)) {
6937                 cmd->base.speed = bp->line_speed;
6938                 cmd->base.duplex = bp->duplex;
6939                 if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) {
6940                         if (bp->phy_flags & BNX2_PHY_FLAG_MDIX)
6941                                 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
6942                         else
6943                                 cmd->base.eth_tp_mdix = ETH_TP_MDI;
6944                 }
6945         }
6946         else {
6947                 cmd->base.speed = SPEED_UNKNOWN;
6948                 cmd->base.duplex = DUPLEX_UNKNOWN;
6949         }
6950         spin_unlock_bh(&bp->phy_lock);
6951
6952         cmd->base.phy_address = bp->phy_addr;
6953
6954         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
6955                                                 supported);
6956         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
6957                                                 advertising);
6958
6959         return 0;
6960 }
6961
6962 static int
6963 bnx2_set_link_ksettings(struct net_device *dev,
6964                         const struct ethtool_link_ksettings *cmd)
6965 {
6966         struct bnx2 *bp = netdev_priv(dev);
6967         u8 autoneg = bp->autoneg;
6968         u8 req_duplex = bp->req_duplex;
6969         u16 req_line_speed = bp->req_line_speed;
6970         u32 advertising = bp->advertising;
6971         int err = -EINVAL;
6972
6973         spin_lock_bh(&bp->phy_lock);
6974
6975         if (cmd->base.port != PORT_TP && cmd->base.port != PORT_FIBRE)
6976                 goto err_out_unlock;
6977
6978         if (cmd->base.port != bp->phy_port &&
6979             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6980                 goto err_out_unlock;
6981
6982         /* If device is down, we can store the settings only if the user
6983          * is setting the currently active port.
6984          */
6985         if (!netif_running(dev) && cmd->base.port != bp->phy_port)
6986                 goto err_out_unlock;
6987
6988         if (cmd->base.autoneg == AUTONEG_ENABLE) {
6989                 autoneg |= AUTONEG_SPEED;
6990
6991                 ethtool_convert_link_mode_to_legacy_u32(
6992                         &advertising, cmd->link_modes.advertising);
6993
6994                 if (cmd->base.port == PORT_TP) {
6995                         advertising &= ETHTOOL_ALL_COPPER_SPEED;
6996                         if (!advertising)
6997                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6998                 } else {
6999                         advertising &= ETHTOOL_ALL_FIBRE_SPEED;
7000                         if (!advertising)
7001                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
7002                 }
7003                 advertising |= ADVERTISED_Autoneg;
7004         }
7005         else {
7006                 u32 speed = cmd->base.speed;
7007
7008                 if (cmd->base.port == PORT_FIBRE) {
7009                         if ((speed != SPEED_1000 &&
7010                              speed != SPEED_2500) ||
7011                             (cmd->base.duplex != DUPLEX_FULL))
7012                                 goto err_out_unlock;
7013
7014                         if (speed == SPEED_2500 &&
7015                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
7016                                 goto err_out_unlock;
7017                 } else if (speed == SPEED_1000 || speed == SPEED_2500)
7018                         goto err_out_unlock;
7019
7020                 autoneg &= ~AUTONEG_SPEED;
7021                 req_line_speed = speed;
7022                 req_duplex = cmd->base.duplex;
7023                 advertising = 0;
7024         }
7025
7026         bp->autoneg = autoneg;
7027         bp->advertising = advertising;
7028         bp->req_line_speed = req_line_speed;
7029         bp->req_duplex = req_duplex;
7030
7031         err = 0;
7032         /* If device is down, the new settings will be picked up when it is
7033          * brought up.
7034          */
7035         if (netif_running(dev))
7036                 err = bnx2_setup_phy(bp, cmd->base.port);
7037
7038 err_out_unlock:
7039         spin_unlock_bh(&bp->phy_lock);
7040
7041         return err;
7042 }
7043
7044 static void
7045 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7046 {
7047         struct bnx2 *bp = netdev_priv(dev);
7048
7049         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
7050         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
7051         strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
7052         strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
7053 }
7054
7055 #define BNX2_REGDUMP_LEN                (32 * 1024)
7056
7057 static int
7058 bnx2_get_regs_len(struct net_device *dev)
7059 {
7060         return BNX2_REGDUMP_LEN;
7061 }
7062
7063 static void
7064 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
7065 {
7066         u32 *p = _p, i, offset;
7067         u8 *orig_p = _p;
7068         struct bnx2 *bp = netdev_priv(dev);
7069         static const u32 reg_boundaries[] = {
7070                 0x0000, 0x0098, 0x0400, 0x045c,
7071                 0x0800, 0x0880, 0x0c00, 0x0c10,
7072                 0x0c30, 0x0d08, 0x1000, 0x101c,
7073                 0x1040, 0x1048, 0x1080, 0x10a4,
7074                 0x1400, 0x1490, 0x1498, 0x14f0,
7075                 0x1500, 0x155c, 0x1580, 0x15dc,
7076                 0x1600, 0x1658, 0x1680, 0x16d8,
7077                 0x1800, 0x1820, 0x1840, 0x1854,
7078                 0x1880, 0x1894, 0x1900, 0x1984,
7079                 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
7080                 0x1c80, 0x1c94, 0x1d00, 0x1d84,
7081                 0x2000, 0x2030, 0x23c0, 0x2400,
7082                 0x2800, 0x2820, 0x2830, 0x2850,
7083                 0x2b40, 0x2c10, 0x2fc0, 0x3058,
7084                 0x3c00, 0x3c94, 0x4000, 0x4010,
7085                 0x4080, 0x4090, 0x43c0, 0x4458,
7086                 0x4c00, 0x4c18, 0x4c40, 0x4c54,
7087                 0x4fc0, 0x5010, 0x53c0, 0x5444,
7088                 0x5c00, 0x5c18, 0x5c80, 0x5c90,
7089                 0x5fc0, 0x6000, 0x6400, 0x6428,
7090                 0x6800, 0x6848, 0x684c, 0x6860,
7091                 0x6888, 0x6910, 0x8000
7092         };
7093
7094         regs->version = 0;
7095
7096         memset(p, 0, BNX2_REGDUMP_LEN);
7097
7098         if (!netif_running(bp->dev))
7099                 return;
7100
7101         i = 0;
7102         offset = reg_boundaries[0];
7103         p += offset;
7104         while (offset < BNX2_REGDUMP_LEN) {
7105                 *p++ = BNX2_RD(bp, offset);
7106                 offset += 4;
7107                 if (offset == reg_boundaries[i + 1]) {
7108                         offset = reg_boundaries[i + 2];
7109                         p = (u32 *) (orig_p + offset);
7110                         i += 2;
7111                 }
7112         }
7113 }
7114
7115 static void
7116 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7117 {
7118         struct bnx2 *bp = netdev_priv(dev);
7119
7120         if (bp->flags & BNX2_FLAG_NO_WOL) {
7121                 wol->supported = 0;
7122                 wol->wolopts = 0;
7123         }
7124         else {
7125                 wol->supported = WAKE_MAGIC;
7126                 if (bp->wol)
7127                         wol->wolopts = WAKE_MAGIC;
7128                 else
7129                         wol->wolopts = 0;
7130         }
7131         memset(&wol->sopass, 0, sizeof(wol->sopass));
7132 }
7133
7134 static int
7135 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7136 {
7137         struct bnx2 *bp = netdev_priv(dev);
7138
7139         if (wol->wolopts & ~WAKE_MAGIC)
7140                 return -EINVAL;
7141
7142         if (wol->wolopts & WAKE_MAGIC) {
7143                 if (bp->flags & BNX2_FLAG_NO_WOL)
7144                         return -EINVAL;
7145
7146                 bp->wol = 1;
7147         }
7148         else {
7149                 bp->wol = 0;
7150         }
7151
7152         device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
7153
7154         return 0;
7155 }
7156
7157 static int
7158 bnx2_nway_reset(struct net_device *dev)
7159 {
7160         struct bnx2 *bp = netdev_priv(dev);
7161         u32 bmcr;
7162
7163         if (!netif_running(dev))
7164                 return -EAGAIN;
7165
7166         if (!(bp->autoneg & AUTONEG_SPEED)) {
7167                 return -EINVAL;
7168         }
7169
7170         spin_lock_bh(&bp->phy_lock);
7171
7172         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7173                 int rc;
7174
7175                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7176                 spin_unlock_bh(&bp->phy_lock);
7177                 return rc;
7178         }
7179
7180         /* Force a link down visible on the other side */
7181         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7182                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7183                 spin_unlock_bh(&bp->phy_lock);
7184
7185                 msleep(20);
7186
7187                 spin_lock_bh(&bp->phy_lock);
7188
7189                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7190                 bp->serdes_an_pending = 1;
7191                 mod_timer(&bp->timer, jiffies + bp->current_interval);
7192         }
7193
7194         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7195         bmcr &= ~BMCR_LOOPBACK;
7196         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7197
7198         spin_unlock_bh(&bp->phy_lock);
7199
7200         return 0;
7201 }
7202
7203 static u32
7204 bnx2_get_link(struct net_device *dev)
7205 {
7206         struct bnx2 *bp = netdev_priv(dev);
7207
7208         return bp->link_up;
7209 }
7210
7211 static int
7212 bnx2_get_eeprom_len(struct net_device *dev)
7213 {
7214         struct bnx2 *bp = netdev_priv(dev);
7215
7216         if (bp->flash_info == NULL)
7217                 return 0;
7218
7219         return (int) bp->flash_size;
7220 }
7221
7222 static int
7223 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7224                 u8 *eebuf)
7225 {
7226         struct bnx2 *bp = netdev_priv(dev);
7227         int rc;
7228
7229         /* parameters already validated in ethtool_get_eeprom */
7230
7231         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7232
7233         return rc;
7234 }
7235
7236 static int
7237 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7238                 u8 *eebuf)
7239 {
7240         struct bnx2 *bp = netdev_priv(dev);
7241         int rc;
7242
7243         /* parameters already validated in ethtool_set_eeprom */
7244
7245         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7246
7247         return rc;
7248 }
7249
7250 static int
7251 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7252 {
7253         struct bnx2 *bp = netdev_priv(dev);
7254
7255         memset(coal, 0, sizeof(struct ethtool_coalesce));
7256
7257         coal->rx_coalesce_usecs = bp->rx_ticks;
7258         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7259         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7260         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7261
7262         coal->tx_coalesce_usecs = bp->tx_ticks;
7263         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7264         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7265         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7266
7267         coal->stats_block_coalesce_usecs = bp->stats_ticks;
7268
7269         return 0;
7270 }
7271
7272 static int
7273 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7274 {
7275         struct bnx2 *bp = netdev_priv(dev);
7276
7277         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7278         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7279
7280         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7281         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7282
7283         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7284         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7285
7286         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7287         if (bp->rx_quick_cons_trip_int > 0xff)
7288                 bp->rx_quick_cons_trip_int = 0xff;
7289
7290         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7291         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7292
7293         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7294         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7295
7296         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7297         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7298
7299         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7300         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7301                 0xff;
7302
7303         bp->stats_ticks = coal->stats_block_coalesce_usecs;
7304         if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7305                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7306                         bp->stats_ticks = USEC_PER_SEC;
7307         }
7308         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7309                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7310         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7311
7312         if (netif_running(bp->dev)) {
7313                 bnx2_netif_stop(bp, true);
7314                 bnx2_init_nic(bp, 0);
7315                 bnx2_netif_start(bp, true);
7316         }
7317
7318         return 0;
7319 }
7320
7321 static void
7322 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7323 {
7324         struct bnx2 *bp = netdev_priv(dev);
7325
7326         ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
7327         ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
7328
7329         ering->rx_pending = bp->rx_ring_size;
7330         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7331
7332         ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
7333         ering->tx_pending = bp->tx_ring_size;
7334 }
7335
7336 static int
7337 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7338 {
7339         if (netif_running(bp->dev)) {
7340                 /* Reset will erase chipset stats; save them */
7341                 bnx2_save_stats(bp);
7342
7343                 bnx2_netif_stop(bp, true);
7344                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7345                 if (reset_irq) {
7346                         bnx2_free_irq(bp);
7347                         bnx2_del_napi(bp);
7348                 } else {
7349                         __bnx2_free_irq(bp);
7350                 }
7351                 bnx2_free_skbs(bp);
7352                 bnx2_free_mem(bp);
7353         }
7354
7355         bnx2_set_rx_ring_size(bp, rx);
7356         bp->tx_ring_size = tx;
7357
7358         if (netif_running(bp->dev)) {
7359                 int rc = 0;
7360
7361                 if (reset_irq) {
7362                         rc = bnx2_setup_int_mode(bp, disable_msi);
7363                         bnx2_init_napi(bp);
7364                 }
7365
7366                 if (!rc)
7367                         rc = bnx2_alloc_mem(bp);
7368
7369                 if (!rc)
7370                         rc = bnx2_request_irq(bp);
7371
7372                 if (!rc)
7373                         rc = bnx2_init_nic(bp, 0);
7374
7375                 if (rc) {
7376                         bnx2_napi_enable(bp);
7377                         dev_close(bp->dev);
7378                         return rc;
7379                 }
7380 #ifdef BCM_CNIC
7381                 mutex_lock(&bp->cnic_lock);
7382                 /* Let cnic know about the new status block. */
7383                 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7384                         bnx2_setup_cnic_irq_info(bp);
7385                 mutex_unlock(&bp->cnic_lock);
7386 #endif
7387                 bnx2_netif_start(bp, true);
7388         }
7389         return 0;
7390 }
7391
7392 static int
7393 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7394 {
7395         struct bnx2 *bp = netdev_priv(dev);
7396         int rc;
7397
7398         if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
7399                 (ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
7400                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7401
7402                 return -EINVAL;
7403         }
7404         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7405                                    false);
7406         return rc;
7407 }
7408
7409 static void
7410 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7411 {
7412         struct bnx2 *bp = netdev_priv(dev);
7413
7414         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7415         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7416         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7417 }
7418
7419 static int
7420 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7421 {
7422         struct bnx2 *bp = netdev_priv(dev);
7423
7424         bp->req_flow_ctrl = 0;
7425         if (epause->rx_pause)
7426                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7427         if (epause->tx_pause)
7428                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7429
7430         if (epause->autoneg) {
7431                 bp->autoneg |= AUTONEG_FLOW_CTRL;
7432         }
7433         else {
7434                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7435         }
7436
7437         if (netif_running(dev)) {
7438                 spin_lock_bh(&bp->phy_lock);
7439                 bnx2_setup_phy(bp, bp->phy_port);
7440                 spin_unlock_bh(&bp->phy_lock);
7441         }
7442
7443         return 0;
7444 }
7445
7446 static struct {
7447         char string[ETH_GSTRING_LEN];
7448 } bnx2_stats_str_arr[] = {
7449         { "rx_bytes" },
7450         { "rx_error_bytes" },
7451         { "tx_bytes" },
7452         { "tx_error_bytes" },
7453         { "rx_ucast_packets" },
7454         { "rx_mcast_packets" },
7455         { "rx_bcast_packets" },
7456         { "tx_ucast_packets" },
7457         { "tx_mcast_packets" },
7458         { "tx_bcast_packets" },
7459         { "tx_mac_errors" },
7460         { "tx_carrier_errors" },
7461         { "rx_crc_errors" },
7462         { "rx_align_errors" },
7463         { "tx_single_collisions" },
7464         { "tx_multi_collisions" },
7465         { "tx_deferred" },
7466         { "tx_excess_collisions" },
7467         { "tx_late_collisions" },
7468         { "tx_total_collisions" },
7469         { "rx_fragments" },
7470         { "rx_jabbers" },
7471         { "rx_undersize_packets" },
7472         { "rx_oversize_packets" },
7473         { "rx_64_byte_packets" },
7474         { "rx_65_to_127_byte_packets" },
7475         { "rx_128_to_255_byte_packets" },
7476         { "rx_256_to_511_byte_packets" },
7477         { "rx_512_to_1023_byte_packets" },
7478         { "rx_1024_to_1522_byte_packets" },
7479         { "rx_1523_to_9022_byte_packets" },
7480         { "tx_64_byte_packets" },
7481         { "tx_65_to_127_byte_packets" },
7482         { "tx_128_to_255_byte_packets" },
7483         { "tx_256_to_511_byte_packets" },
7484         { "tx_512_to_1023_byte_packets" },
7485         { "tx_1024_to_1522_byte_packets" },
7486         { "tx_1523_to_9022_byte_packets" },
7487         { "rx_xon_frames" },
7488         { "rx_xoff_frames" },
7489         { "tx_xon_frames" },
7490         { "tx_xoff_frames" },
7491         { "rx_mac_ctrl_frames" },
7492         { "rx_filtered_packets" },
7493         { "rx_ftq_discards" },
7494         { "rx_discards" },
7495         { "rx_fw_discards" },
7496 };
7497
7498 #define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7499
7500 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7501
7502 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7503     STATS_OFFSET32(stat_IfHCInOctets_hi),
7504     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7505     STATS_OFFSET32(stat_IfHCOutOctets_hi),
7506     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7507     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7508     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7509     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7510     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7511     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7512     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7513     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7514     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7515     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7516     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7517     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7518     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7519     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7520     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7521     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7522     STATS_OFFSET32(stat_EtherStatsCollisions),
7523     STATS_OFFSET32(stat_EtherStatsFragments),
7524     STATS_OFFSET32(stat_EtherStatsJabbers),
7525     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7526     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7527     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7528     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7529     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7530     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7531     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7532     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7533     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7534     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7535     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7536     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7537     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7538     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7539     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7540     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7541     STATS_OFFSET32(stat_XonPauseFramesReceived),
7542     STATS_OFFSET32(stat_XoffPauseFramesReceived),
7543     STATS_OFFSET32(stat_OutXonSent),
7544     STATS_OFFSET32(stat_OutXoffSent),
7545     STATS_OFFSET32(stat_MacControlFramesReceived),
7546     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7547     STATS_OFFSET32(stat_IfInFTQDiscards),
7548     STATS_OFFSET32(stat_IfInMBUFDiscards),
7549     STATS_OFFSET32(stat_FwRxDrop),
7550 };
7551
7552 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7553  * skipped because of errata.
7554  */
7555 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7556         8,0,8,8,8,8,8,8,8,8,
7557         4,0,4,4,4,4,4,4,4,4,
7558         4,4,4,4,4,4,4,4,4,4,
7559         4,4,4,4,4,4,4,4,4,4,
7560         4,4,4,4,4,4,4,
7561 };
7562
7563 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7564         8,0,8,8,8,8,8,8,8,8,
7565         4,4,4,4,4,4,4,4,4,4,
7566         4,4,4,4,4,4,4,4,4,4,
7567         4,4,4,4,4,4,4,4,4,4,
7568         4,4,4,4,4,4,4,
7569 };
7570
7571 #define BNX2_NUM_TESTS 6
7572
7573 static struct {
7574         char string[ETH_GSTRING_LEN];
7575 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7576         { "register_test (offline)" },
7577         { "memory_test (offline)" },
7578         { "loopback_test (offline)" },
7579         { "nvram_test (online)" },
7580         { "interrupt_test (online)" },
7581         { "link_test (online)" },
7582 };
7583
7584 static int
7585 bnx2_get_sset_count(struct net_device *dev, int sset)
7586 {
7587         switch (sset) {
7588         case ETH_SS_TEST:
7589                 return BNX2_NUM_TESTS;
7590         case ETH_SS_STATS:
7591                 return BNX2_NUM_STATS;
7592         default:
7593                 return -EOPNOTSUPP;
7594         }
7595 }
7596
7597 static void
7598 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7599 {
7600         struct bnx2 *bp = netdev_priv(dev);
7601
7602         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7603         if (etest->flags & ETH_TEST_FL_OFFLINE) {
7604                 int i;
7605
7606                 bnx2_netif_stop(bp, true);
7607                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7608                 bnx2_free_skbs(bp);
7609
7610                 if (bnx2_test_registers(bp) != 0) {
7611                         buf[0] = 1;
7612                         etest->flags |= ETH_TEST_FL_FAILED;
7613                 }
7614                 if (bnx2_test_memory(bp) != 0) {
7615                         buf[1] = 1;
7616                         etest->flags |= ETH_TEST_FL_FAILED;
7617                 }
7618                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7619                         etest->flags |= ETH_TEST_FL_FAILED;
7620
7621                 if (!netif_running(bp->dev))
7622                         bnx2_shutdown_chip(bp);
7623                 else {
7624                         bnx2_init_nic(bp, 1);
7625                         bnx2_netif_start(bp, true);
7626                 }
7627
7628                 /* wait for link up */
7629                 for (i = 0; i < 7; i++) {
7630                         if (bp->link_up)
7631                                 break;
7632                         msleep_interruptible(1000);
7633                 }
7634         }
7635
7636         if (bnx2_test_nvram(bp) != 0) {
7637                 buf[3] = 1;
7638                 etest->flags |= ETH_TEST_FL_FAILED;
7639         }
7640         if (bnx2_test_intr(bp) != 0) {
7641                 buf[4] = 1;
7642                 etest->flags |= ETH_TEST_FL_FAILED;
7643         }
7644
7645         if (bnx2_test_link(bp) != 0) {
7646                 buf[5] = 1;
7647                 etest->flags |= ETH_TEST_FL_FAILED;
7648
7649         }
7650 }
7651
7652 static void
7653 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7654 {
7655         switch (stringset) {
7656         case ETH_SS_STATS:
7657                 memcpy(buf, bnx2_stats_str_arr,
7658                         sizeof(bnx2_stats_str_arr));
7659                 break;
7660         case ETH_SS_TEST:
7661                 memcpy(buf, bnx2_tests_str_arr,
7662                         sizeof(bnx2_tests_str_arr));
7663                 break;
7664         }
7665 }
7666
7667 static void
7668 bnx2_get_ethtool_stats(struct net_device *dev,
7669                 struct ethtool_stats *stats, u64 *buf)
7670 {
7671         struct bnx2 *bp = netdev_priv(dev);
7672         int i;
7673         u32 *hw_stats = (u32 *) bp->stats_blk;
7674         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7675         u8 *stats_len_arr = NULL;
7676
7677         if (hw_stats == NULL) {
7678                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7679                 return;
7680         }
7681
7682         if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
7683             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
7684             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
7685             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
7686                 stats_len_arr = bnx2_5706_stats_len_arr;
7687         else
7688                 stats_len_arr = bnx2_5708_stats_len_arr;
7689
7690         for (i = 0; i < BNX2_NUM_STATS; i++) {
7691                 unsigned long offset;
7692
7693                 if (stats_len_arr[i] == 0) {
7694                         /* skip this counter */
7695                         buf[i] = 0;
7696                         continue;
7697                 }
7698
7699                 offset = bnx2_stats_offset_arr[i];
7700                 if (stats_len_arr[i] == 4) {
7701                         /* 4-byte counter */
7702                         buf[i] = (u64) *(hw_stats + offset) +
7703                                  *(temp_stats + offset);
7704                         continue;
7705                 }
7706                 /* 8-byte counter */
7707                 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7708                          *(hw_stats + offset + 1) +
7709                          (((u64) *(temp_stats + offset)) << 32) +
7710                          *(temp_stats + offset + 1);
7711         }
7712 }
7713
7714 static int
7715 bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7716 {
7717         struct bnx2 *bp = netdev_priv(dev);
7718
7719         switch (state) {
7720         case ETHTOOL_ID_ACTIVE:
7721                 bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7722                 BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7723                 return 1;       /* cycle on/off once per second */
7724
7725         case ETHTOOL_ID_ON:
7726                 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7727                         BNX2_EMAC_LED_1000MB_OVERRIDE |
7728                         BNX2_EMAC_LED_100MB_OVERRIDE |
7729                         BNX2_EMAC_LED_10MB_OVERRIDE |
7730                         BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7731                         BNX2_EMAC_LED_TRAFFIC);
7732                 break;
7733
7734         case ETHTOOL_ID_OFF:
7735                 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7736                 break;
7737
7738         case ETHTOOL_ID_INACTIVE:
7739                 BNX2_WR(bp, BNX2_EMAC_LED, 0);
7740                 BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7741                 break;
7742         }
7743
7744         return 0;
7745 }
7746
7747 static int
7748 bnx2_set_features(struct net_device *dev, netdev_features_t features)
7749 {
7750         struct bnx2 *bp = netdev_priv(dev);
7751
7752         /* TSO with VLAN tag won't work with current firmware */
7753         if (features & NETIF_F_HW_VLAN_CTAG_TX)
7754                 dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7755         else
7756                 dev->vlan_features &= ~NETIF_F_ALL_TSO;
7757
7758         if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
7759             !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7760             netif_running(dev)) {
7761                 bnx2_netif_stop(bp, false);
7762                 dev->features = features;
7763                 bnx2_set_rx_mode(dev);
7764                 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7765                 bnx2_netif_start(bp, false);
7766                 return 1;
7767         }
7768
7769         return 0;
7770 }
7771
7772 static void bnx2_get_channels(struct net_device *dev,
7773                               struct ethtool_channels *channels)
7774 {
7775         struct bnx2 *bp = netdev_priv(dev);
7776         u32 max_rx_rings = 1;
7777         u32 max_tx_rings = 1;
7778
7779         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7780                 max_rx_rings = RX_MAX_RINGS;
7781                 max_tx_rings = TX_MAX_RINGS;
7782         }
7783
7784         channels->max_rx = max_rx_rings;
7785         channels->max_tx = max_tx_rings;
7786         channels->max_other = 0;
7787         channels->max_combined = 0;
7788         channels->rx_count = bp->num_rx_rings;
7789         channels->tx_count = bp->num_tx_rings;
7790         channels->other_count = 0;
7791         channels->combined_count = 0;
7792 }
7793
7794 static int bnx2_set_channels(struct net_device *dev,
7795                               struct ethtool_channels *channels)
7796 {
7797         struct bnx2 *bp = netdev_priv(dev);
7798         u32 max_rx_rings = 1;
7799         u32 max_tx_rings = 1;
7800         int rc = 0;
7801
7802         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7803                 max_rx_rings = RX_MAX_RINGS;
7804                 max_tx_rings = TX_MAX_RINGS;
7805         }
7806         if (channels->rx_count > max_rx_rings ||
7807             channels->tx_count > max_tx_rings)
7808                 return -EINVAL;
7809
7810         bp->num_req_rx_rings = channels->rx_count;
7811         bp->num_req_tx_rings = channels->tx_count;
7812
7813         if (netif_running(dev))
7814                 rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7815                                            bp->tx_ring_size, true);
7816
7817         return rc;
7818 }
7819
7820 static const struct ethtool_ops bnx2_ethtool_ops = {
7821         .get_drvinfo            = bnx2_get_drvinfo,
7822         .get_regs_len           = bnx2_get_regs_len,
7823         .get_regs               = bnx2_get_regs,
7824         .get_wol                = bnx2_get_wol,
7825         .set_wol                = bnx2_set_wol,
7826         .nway_reset             = bnx2_nway_reset,
7827         .get_link               = bnx2_get_link,
7828         .get_eeprom_len         = bnx2_get_eeprom_len,
7829         .get_eeprom             = bnx2_get_eeprom,
7830         .set_eeprom             = bnx2_set_eeprom,
7831         .get_coalesce           = bnx2_get_coalesce,
7832         .set_coalesce           = bnx2_set_coalesce,
7833         .get_ringparam          = bnx2_get_ringparam,
7834         .set_ringparam          = bnx2_set_ringparam,
7835         .get_pauseparam         = bnx2_get_pauseparam,
7836         .set_pauseparam         = bnx2_set_pauseparam,
7837         .self_test              = bnx2_self_test,
7838         .get_strings            = bnx2_get_strings,
7839         .set_phys_id            = bnx2_set_phys_id,
7840         .get_ethtool_stats      = bnx2_get_ethtool_stats,
7841         .get_sset_count         = bnx2_get_sset_count,
7842         .get_channels           = bnx2_get_channels,
7843         .set_channels           = bnx2_set_channels,
7844         .get_link_ksettings     = bnx2_get_link_ksettings,
7845         .set_link_ksettings     = bnx2_set_link_ksettings,
7846 };
7847
7848 /* Called with rtnl_lock */
7849 static int
7850 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7851 {
7852         struct mii_ioctl_data *data = if_mii(ifr);
7853         struct bnx2 *bp = netdev_priv(dev);
7854         int err;
7855
7856         switch(cmd) {
7857         case SIOCGMIIPHY:
7858                 data->phy_id = bp->phy_addr;
7859
7860                 /* fallthru */
7861         case SIOCGMIIREG: {
7862                 u32 mii_regval;
7863
7864                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7865                         return -EOPNOTSUPP;
7866
7867                 if (!netif_running(dev))
7868                         return -EAGAIN;
7869
7870                 spin_lock_bh(&bp->phy_lock);
7871                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7872                 spin_unlock_bh(&bp->phy_lock);
7873
7874                 data->val_out = mii_regval;
7875
7876                 return err;
7877         }
7878
7879         case SIOCSMIIREG:
7880                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7881                         return -EOPNOTSUPP;
7882
7883                 if (!netif_running(dev))
7884                         return -EAGAIN;
7885
7886                 spin_lock_bh(&bp->phy_lock);
7887                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7888                 spin_unlock_bh(&bp->phy_lock);
7889
7890                 return err;
7891
7892         default:
7893                 /* do nothing */
7894                 break;
7895         }
7896         return -EOPNOTSUPP;
7897 }
7898
7899 /* Called with rtnl_lock */
7900 static int
7901 bnx2_change_mac_addr(struct net_device *dev, void *p)
7902 {
7903         struct sockaddr *addr = p;
7904         struct bnx2 *bp = netdev_priv(dev);
7905
7906         if (!is_valid_ether_addr(addr->sa_data))
7907                 return -EADDRNOTAVAIL;
7908
7909         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7910         if (netif_running(dev))
7911                 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7912
7913         return 0;
7914 }
7915
7916 /* Called with rtnl_lock */
7917 static int
7918 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7919 {
7920         struct bnx2 *bp = netdev_priv(dev);
7921
7922         dev->mtu = new_mtu;
7923         return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7924                                      false);
7925 }
7926
7927 #ifdef CONFIG_NET_POLL_CONTROLLER
7928 static void
7929 poll_bnx2(struct net_device *dev)
7930 {
7931         struct bnx2 *bp = netdev_priv(dev);
7932         int i;
7933
7934         for (i = 0; i < bp->irq_nvecs; i++) {
7935                 struct bnx2_irq *irq = &bp->irq_tbl[i];
7936
7937                 disable_irq(irq->vector);
7938                 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7939                 enable_irq(irq->vector);
7940         }
7941 }
7942 #endif
7943
7944 static void
7945 bnx2_get_5709_media(struct bnx2 *bp)
7946 {
7947         u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7948         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7949         u32 strap;
7950
7951         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7952                 return;
7953         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7954                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7955                 return;
7956         }
7957
7958         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7959                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7960         else
7961                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7962
7963         if (bp->func == 0) {
7964                 switch (strap) {
7965                 case 0x4:
7966                 case 0x5:
7967                 case 0x6:
7968                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7969                         return;
7970                 }
7971         } else {
7972                 switch (strap) {
7973                 case 0x1:
7974                 case 0x2:
7975                 case 0x4:
7976                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7977                         return;
7978                 }
7979         }
7980 }
7981
7982 static void
7983 bnx2_get_pci_speed(struct bnx2 *bp)
7984 {
7985         u32 reg;
7986
7987         reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
7988         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7989                 u32 clkreg;
7990
7991                 bp->flags |= BNX2_FLAG_PCIX;
7992
7993                 clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7994
7995                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7996                 switch (clkreg) {
7997                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7998                         bp->bus_speed_mhz = 133;
7999                         break;
8000
8001                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
8002                         bp->bus_speed_mhz = 100;
8003                         break;
8004
8005                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
8006                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
8007                         bp->bus_speed_mhz = 66;
8008                         break;
8009
8010                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
8011                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
8012                         bp->bus_speed_mhz = 50;
8013                         break;
8014
8015                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
8016                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
8017                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
8018                         bp->bus_speed_mhz = 33;
8019                         break;
8020                 }
8021         }
8022         else {
8023                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
8024                         bp->bus_speed_mhz = 66;
8025                 else
8026                         bp->bus_speed_mhz = 33;
8027         }
8028
8029         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
8030                 bp->flags |= BNX2_FLAG_PCI_32BIT;
8031
8032 }
8033
8034 static void
8035 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
8036 {
8037         int rc, i, j;
8038         u8 *data;
8039         unsigned int block_end, rosize, len;
8040
8041 #define BNX2_VPD_NVRAM_OFFSET   0x300
8042 #define BNX2_VPD_LEN            128
8043 #define BNX2_MAX_VER_SLEN       30
8044
8045         data = kmalloc(256, GFP_KERNEL);
8046         if (!data)
8047                 return;
8048
8049         rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
8050                              BNX2_VPD_LEN);
8051         if (rc)
8052                 goto vpd_done;
8053
8054         for (i = 0; i < BNX2_VPD_LEN; i += 4) {
8055                 data[i] = data[i + BNX2_VPD_LEN + 3];
8056                 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
8057                 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
8058                 data[i + 3] = data[i + BNX2_VPD_LEN];
8059         }
8060
8061         i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
8062         if (i < 0)
8063                 goto vpd_done;
8064
8065         rosize = pci_vpd_lrdt_size(&data[i]);
8066         i += PCI_VPD_LRDT_TAG_SIZE;
8067         block_end = i + rosize;
8068
8069         if (block_end > BNX2_VPD_LEN)
8070                 goto vpd_done;
8071
8072         j = pci_vpd_find_info_keyword(data, i, rosize,
8073                                       PCI_VPD_RO_KEYWORD_MFR_ID);
8074         if (j < 0)
8075                 goto vpd_done;
8076
8077         len = pci_vpd_info_field_size(&data[j]);
8078
8079         j += PCI_VPD_INFO_FLD_HDR_SIZE;
8080         if (j + len > block_end || len != 4 ||
8081             memcmp(&data[j], "1028", 4))
8082                 goto vpd_done;
8083
8084         j = pci_vpd_find_info_keyword(data, i, rosize,
8085                                       PCI_VPD_RO_KEYWORD_VENDOR0);
8086         if (j < 0)
8087                 goto vpd_done;
8088
8089         len = pci_vpd_info_field_size(&data[j]);
8090
8091         j += PCI_VPD_INFO_FLD_HDR_SIZE;
8092         if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
8093                 goto vpd_done;
8094
8095         memcpy(bp->fw_version, &data[j], len);
8096         bp->fw_version[len] = ' ';
8097
8098 vpd_done:
8099         kfree(data);
8100 }
8101
8102 static int
8103 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8104 {
8105         struct bnx2 *bp;
8106         int rc, i, j;
8107         u32 reg;
8108         u64 dma_mask, persist_dma_mask;
8109         int err;
8110
8111         SET_NETDEV_DEV(dev, &pdev->dev);
8112         bp = netdev_priv(dev);
8113
8114         bp->flags = 0;
8115         bp->phy_flags = 0;
8116
8117         bp->temp_stats_blk =
8118                 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
8119
8120         if (bp->temp_stats_blk == NULL) {
8121                 rc = -ENOMEM;
8122                 goto err_out;
8123         }
8124
8125         /* enable device (incl. PCI PM wakeup), and bus-mastering */
8126         rc = pci_enable_device(pdev);
8127         if (rc) {
8128                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8129                 goto err_out;
8130         }
8131
8132         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8133                 dev_err(&pdev->dev,
8134                         "Cannot find PCI device base address, aborting\n");
8135                 rc = -ENODEV;
8136                 goto err_out_disable;
8137         }
8138
8139         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8140         if (rc) {
8141                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8142                 goto err_out_disable;
8143         }
8144
8145         pci_set_master(pdev);
8146
8147         bp->pm_cap = pdev->pm_cap;
8148         if (bp->pm_cap == 0) {
8149                 dev_err(&pdev->dev,
8150                         "Cannot find power management capability, aborting\n");
8151                 rc = -EIO;
8152                 goto err_out_release;
8153         }
8154
8155         bp->dev = dev;
8156         bp->pdev = pdev;
8157
8158         spin_lock_init(&bp->phy_lock);
8159         spin_lock_init(&bp->indirect_lock);
8160 #ifdef BCM_CNIC
8161         mutex_init(&bp->cnic_lock);
8162 #endif
8163         INIT_WORK(&bp->reset_task, bnx2_reset_task);
8164
8165         bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8166                                                          TX_MAX_TSS_RINGS + 1));
8167         if (!bp->regview) {
8168                 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8169                 rc = -ENOMEM;
8170                 goto err_out_release;
8171         }
8172
8173         /* Configure byte swap and enable write to the reg_window registers.
8174          * Rely on CPU to do target byte swapping on big endian systems
8175          * The chip's target access swapping will not swap all accesses
8176          */
8177         BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8178                 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8179                 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8180
8181         bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
8182
8183         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
8184                 if (!pci_is_pcie(pdev)) {
8185                         dev_err(&pdev->dev, "Not PCIE, aborting\n");
8186                         rc = -EIO;
8187                         goto err_out_unmap;
8188                 }
8189                 bp->flags |= BNX2_FLAG_PCIE;
8190                 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
8191                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8192
8193                 /* AER (Advanced Error Reporting) hooks */
8194                 err = pci_enable_pcie_error_reporting(pdev);
8195                 if (!err)
8196                         bp->flags |= BNX2_FLAG_AER_ENABLED;
8197
8198         } else {
8199                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8200                 if (bp->pcix_cap == 0) {
8201                         dev_err(&pdev->dev,
8202                                 "Cannot find PCIX capability, aborting\n");
8203                         rc = -EIO;
8204                         goto err_out_unmap;
8205                 }
8206                 bp->flags |= BNX2_FLAG_BROKEN_STATS;
8207         }
8208
8209         if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8210             BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
8211                 if (pdev->msix_cap)
8212                         bp->flags |= BNX2_FLAG_MSIX_CAP;
8213         }
8214
8215         if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
8216             BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
8217                 if (pdev->msi_cap)
8218                         bp->flags |= BNX2_FLAG_MSI_CAP;
8219         }
8220
8221         /* 5708 cannot support DMA addresses > 40-bit.  */
8222         if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
8223                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8224         else
8225                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8226
8227         /* Configure DMA attributes. */
8228         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8229                 dev->features |= NETIF_F_HIGHDMA;
8230                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8231                 if (rc) {
8232                         dev_err(&pdev->dev,
8233                                 "pci_set_consistent_dma_mask failed, aborting\n");
8234                         goto err_out_unmap;
8235                 }
8236         } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8237                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8238                 goto err_out_unmap;
8239         }
8240
8241         if (!(bp->flags & BNX2_FLAG_PCIE))
8242                 bnx2_get_pci_speed(bp);
8243
8244         /* 5706A0 may falsely detect SERR and PERR. */
8245         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8246                 reg = BNX2_RD(bp, PCI_COMMAND);
8247                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8248                 BNX2_WR(bp, PCI_COMMAND, reg);
8249         } else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
8250                 !(bp->flags & BNX2_FLAG_PCIX)) {
8251                 dev_err(&pdev->dev,
8252                         "5706 A1 can only be used in a PCIX bus, aborting\n");
8253                 rc = -EPERM;
8254                 goto err_out_unmap;
8255         }
8256
8257         bnx2_init_nvram(bp);
8258
8259         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8260
8261         if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
8262                 bp->func = 1;
8263
8264         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8265             BNX2_SHM_HDR_SIGNATURE_SIG) {
8266                 u32 off = bp->func << 2;
8267
8268                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8269         } else
8270                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8271
8272         /* Get the permanent MAC address.  First we need to make sure the
8273          * firmware is actually running.
8274          */
8275         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8276
8277         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8278             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8279                 dev_err(&pdev->dev, "Firmware not running, aborting\n");
8280                 rc = -ENODEV;
8281                 goto err_out_unmap;
8282         }
8283
8284         bnx2_read_vpd_fw_ver(bp);
8285
8286         j = strlen(bp->fw_version);
8287         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8288         for (i = 0; i < 3 && j < 24; i++) {
8289                 u8 num, k, skip0;
8290
8291                 if (i == 0) {
8292                         bp->fw_version[j++] = 'b';
8293                         bp->fw_version[j++] = 'c';
8294                         bp->fw_version[j++] = ' ';
8295                 }
8296                 num = (u8) (reg >> (24 - (i * 8)));
8297                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8298                         if (num >= k || !skip0 || k == 1) {
8299                                 bp->fw_version[j++] = (num / k) + '0';
8300                                 skip0 = 0;
8301                         }
8302                 }
8303                 if (i != 2)
8304                         bp->fw_version[j++] = '.';
8305         }
8306         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8307         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8308                 bp->wol = 1;
8309
8310         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8311                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8312
8313                 for (i = 0; i < 30; i++) {
8314                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8315                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8316                                 break;
8317                         msleep(10);
8318                 }
8319         }
8320         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8321         reg &= BNX2_CONDITION_MFW_RUN_MASK;
8322         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8323             reg != BNX2_CONDITION_MFW_RUN_NONE) {
8324                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8325
8326                 if (j < 32)
8327                         bp->fw_version[j++] = ' ';
8328                 for (i = 0; i < 3 && j < 28; i++) {
8329                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8330                         reg = be32_to_cpu(reg);
8331                         memcpy(&bp->fw_version[j], &reg, 4);
8332                         j += 4;
8333                 }
8334         }
8335
8336         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8337         bp->mac_addr[0] = (u8) (reg >> 8);
8338         bp->mac_addr[1] = (u8) reg;
8339
8340         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8341         bp->mac_addr[2] = (u8) (reg >> 24);
8342         bp->mac_addr[3] = (u8) (reg >> 16);
8343         bp->mac_addr[4] = (u8) (reg >> 8);
8344         bp->mac_addr[5] = (u8) reg;
8345
8346         bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
8347         bnx2_set_rx_ring_size(bp, 255);
8348
8349         bp->tx_quick_cons_trip_int = 2;
8350         bp->tx_quick_cons_trip = 20;
8351         bp->tx_ticks_int = 18;
8352         bp->tx_ticks = 80;
8353
8354         bp->rx_quick_cons_trip_int = 2;
8355         bp->rx_quick_cons_trip = 12;
8356         bp->rx_ticks_int = 18;
8357         bp->rx_ticks = 18;
8358
8359         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8360
8361         bp->current_interval = BNX2_TIMER_INTERVAL;
8362
8363         bp->phy_addr = 1;
8364
8365         /* allocate stats_blk */
8366         rc = bnx2_alloc_stats_blk(dev);
8367         if (rc)
8368                 goto err_out_unmap;
8369
8370         /* Disable WOL support if we are running on a SERDES chip. */
8371         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8372                 bnx2_get_5709_media(bp);
8373         else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
8374                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8375
8376         bp->phy_port = PORT_TP;
8377         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8378                 bp->phy_port = PORT_FIBRE;
8379                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8380                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8381                         bp->flags |= BNX2_FLAG_NO_WOL;
8382                         bp->wol = 0;
8383                 }
8384                 if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
8385                         /* Don't do parallel detect on this board because of
8386                          * some board problems.  The link will not go down
8387                          * if we do parallel detect.
8388                          */
8389                         if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8390                             pdev->subsystem_device == 0x310c)
8391                                 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8392                 } else {
8393                         bp->phy_addr = 2;
8394                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8395                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8396                 }
8397         } else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
8398                    BNX2_CHIP(bp) == BNX2_CHIP_5708)
8399                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8400         else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8401                  (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
8402                   BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
8403                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8404
8405         bnx2_init_fw_cap(bp);
8406
8407         if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
8408             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
8409             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
8410             !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8411                 bp->flags |= BNX2_FLAG_NO_WOL;
8412                 bp->wol = 0;
8413         }
8414
8415         if (bp->flags & BNX2_FLAG_NO_WOL)
8416                 device_set_wakeup_capable(&bp->pdev->dev, false);
8417         else
8418                 device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
8419
8420         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8421                 bp->tx_quick_cons_trip_int =
8422                         bp->tx_quick_cons_trip;
8423                 bp->tx_ticks_int = bp->tx_ticks;
8424                 bp->rx_quick_cons_trip_int =
8425                         bp->rx_quick_cons_trip;
8426                 bp->rx_ticks_int = bp->rx_ticks;
8427                 bp->comp_prod_trip_int = bp->comp_prod_trip;
8428                 bp->com_ticks_int = bp->com_ticks;
8429                 bp->cmd_ticks_int = bp->cmd_ticks;
8430         }
8431
8432         /* Disable MSI on 5706 if AMD 8132 bridge is found.
8433          *
8434          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8435          * with byte enables disabled on the unused 32-bit word.  This is legal
8436          * but causes problems on the AMD 8132 which will eventually stop
8437          * responding after a while.
8438          *
8439          * AMD believes this incompatibility is unique to the 5706, and
8440          * prefers to locally disable MSI rather than globally disabling it.
8441          */
8442         if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
8443                 struct pci_dev *amd_8132 = NULL;
8444
8445                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8446                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
8447                                                   amd_8132))) {
8448
8449                         if (amd_8132->revision >= 0x10 &&
8450                             amd_8132->revision <= 0x13) {
8451                                 disable_msi = 1;
8452                                 pci_dev_put(amd_8132);
8453                                 break;
8454                         }
8455                 }
8456         }
8457
8458         bnx2_set_default_link(bp);
8459         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8460
8461         init_timer(&bp->timer);
8462         bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8463         bp->timer.data = (unsigned long) bp;
8464         bp->timer.function = bnx2_timer;
8465
8466 #ifdef BCM_CNIC
8467         if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8468                 bp->cnic_eth_dev.max_iscsi_conn =
8469                         (bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8470                          BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8471         bp->cnic_probe = bnx2_cnic_probe;
8472 #endif
8473         pci_save_state(pdev);
8474
8475         return 0;
8476
8477 err_out_unmap:
8478         if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8479                 pci_disable_pcie_error_reporting(pdev);
8480                 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8481         }
8482
8483         pci_iounmap(pdev, bp->regview);
8484         bp->regview = NULL;
8485
8486 err_out_release:
8487         pci_release_regions(pdev);
8488
8489 err_out_disable:
8490         pci_disable_device(pdev);
8491
8492 err_out:
8493         kfree(bp->temp_stats_blk);
8494
8495         return rc;
8496 }
8497
8498 static char *
8499 bnx2_bus_string(struct bnx2 *bp, char *str)
8500 {
8501         char *s = str;
8502
8503         if (bp->flags & BNX2_FLAG_PCIE) {
8504                 s += sprintf(s, "PCI Express");
8505         } else {
8506                 s += sprintf(s, "PCI");
8507                 if (bp->flags & BNX2_FLAG_PCIX)
8508                         s += sprintf(s, "-X");
8509                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8510                         s += sprintf(s, " 32-bit");
8511                 else
8512                         s += sprintf(s, " 64-bit");
8513                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8514         }
8515         return str;
8516 }
8517
8518 static void
8519 bnx2_del_napi(struct bnx2 *bp)
8520 {
8521         int i;
8522
8523         for (i = 0; i < bp->irq_nvecs; i++)
8524                 netif_napi_del(&bp->bnx2_napi[i].napi);
8525 }
8526
8527 static void
8528 bnx2_init_napi(struct bnx2 *bp)
8529 {
8530         int i;
8531
8532         for (i = 0; i < bp->irq_nvecs; i++) {
8533                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8534                 int (*poll)(struct napi_struct *, int);
8535
8536                 if (i == 0)
8537                         poll = bnx2_poll;
8538                 else
8539                         poll = bnx2_poll_msix;
8540
8541                 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8542                 bnapi->bp = bp;
8543         }
8544 }
8545
8546 static const struct net_device_ops bnx2_netdev_ops = {
8547         .ndo_open               = bnx2_open,
8548         .ndo_start_xmit         = bnx2_start_xmit,
8549         .ndo_stop               = bnx2_close,
8550         .ndo_get_stats64        = bnx2_get_stats64,
8551         .ndo_set_rx_mode        = bnx2_set_rx_mode,
8552         .ndo_do_ioctl           = bnx2_ioctl,
8553         .ndo_validate_addr      = eth_validate_addr,
8554         .ndo_set_mac_address    = bnx2_change_mac_addr,
8555         .ndo_change_mtu         = bnx2_change_mtu,
8556         .ndo_set_features       = bnx2_set_features,
8557         .ndo_tx_timeout         = bnx2_tx_timeout,
8558 #ifdef CONFIG_NET_POLL_CONTROLLER
8559         .ndo_poll_controller    = poll_bnx2,
8560 #endif
8561 };
8562
8563 static int
8564 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8565 {
8566         static int version_printed = 0;
8567         struct net_device *dev;
8568         struct bnx2 *bp;
8569         int rc;
8570         char str[40];
8571
8572         if (version_printed++ == 0)
8573                 pr_info("%s", version);
8574
8575         /* dev zeroed in init_etherdev */
8576         dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8577         if (!dev)
8578                 return -ENOMEM;
8579
8580         rc = bnx2_init_board(pdev, dev);
8581         if (rc < 0)
8582                 goto err_free;
8583
8584         dev->netdev_ops = &bnx2_netdev_ops;
8585         dev->watchdog_timeo = TX_TIMEOUT;
8586         dev->ethtool_ops = &bnx2_ethtool_ops;
8587
8588         bp = netdev_priv(dev);
8589
8590         pci_set_drvdata(pdev, dev);
8591
8592         /*
8593          * In-flight DMA from 1st kernel could continue going in kdump kernel.
8594          * New io-page table has been created before bnx2 does reset at open stage.
8595          * We have to wait for the in-flight DMA to complete to avoid it look up
8596          * into the newly created io-page table.
8597          */
8598         if (is_kdump_kernel())
8599                 bnx2_wait_dma_complete(bp);
8600
8601         memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
8602
8603         dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8604                 NETIF_F_TSO | NETIF_F_TSO_ECN |
8605                 NETIF_F_RXHASH | NETIF_F_RXCSUM;
8606
8607         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8608                 dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8609
8610         dev->vlan_features = dev->hw_features;
8611         dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
8612         dev->features |= dev->hw_features;
8613         dev->priv_flags |= IFF_UNICAST_FLT;
8614         dev->min_mtu = MIN_ETHERNET_PACKET_SIZE;
8615         dev->max_mtu = MAX_ETHERNET_JUMBO_PACKET_SIZE;
8616
8617         if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
8618                 dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
8619
8620         if ((rc = register_netdev(dev))) {
8621                 dev_err(&pdev->dev, "Cannot register net device\n");
8622                 goto error;
8623         }
8624
8625         netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8626                     "node addr %pM\n", board_info[ent->driver_data].name,
8627                     ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8628                     ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
8629                     bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8630                     pdev->irq, dev->dev_addr);
8631
8632         return 0;
8633
8634 error:
8635         pci_iounmap(pdev, bp->regview);
8636         pci_release_regions(pdev);
8637         pci_disable_device(pdev);
8638 err_free:
8639         bnx2_free_stats_blk(dev);
8640         free_netdev(dev);
8641         return rc;
8642 }
8643
8644 static void
8645 bnx2_remove_one(struct pci_dev *pdev)
8646 {
8647         struct net_device *dev = pci_get_drvdata(pdev);
8648         struct bnx2 *bp = netdev_priv(dev);
8649
8650         unregister_netdev(dev);
8651
8652         del_timer_sync(&bp->timer);
8653         cancel_work_sync(&bp->reset_task);
8654
8655         pci_iounmap(bp->pdev, bp->regview);
8656
8657         bnx2_free_stats_blk(dev);
8658         kfree(bp->temp_stats_blk);
8659
8660         if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8661                 pci_disable_pcie_error_reporting(pdev);
8662                 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8663         }
8664
8665         bnx2_release_firmware(bp);
8666
8667         free_netdev(dev);
8668
8669         pci_release_regions(pdev);
8670         pci_disable_device(pdev);
8671 }
8672
8673 #ifdef CONFIG_PM_SLEEP
8674 static int
8675 bnx2_suspend(struct device *device)
8676 {
8677         struct pci_dev *pdev = to_pci_dev(device);
8678         struct net_device *dev = pci_get_drvdata(pdev);
8679         struct bnx2 *bp = netdev_priv(dev);
8680
8681         if (netif_running(dev)) {
8682                 cancel_work_sync(&bp->reset_task);
8683                 bnx2_netif_stop(bp, true);
8684                 netif_device_detach(dev);
8685                 del_timer_sync(&bp->timer);
8686                 bnx2_shutdown_chip(bp);
8687                 __bnx2_free_irq(bp);
8688                 bnx2_free_skbs(bp);
8689         }
8690         bnx2_setup_wol(bp);
8691         return 0;
8692 }
8693
8694 static int
8695 bnx2_resume(struct device *device)
8696 {
8697         struct pci_dev *pdev = to_pci_dev(device);
8698         struct net_device *dev = pci_get_drvdata(pdev);
8699         struct bnx2 *bp = netdev_priv(dev);
8700
8701         if (!netif_running(dev))
8702                 return 0;
8703
8704         bnx2_set_power_state(bp, PCI_D0);
8705         netif_device_attach(dev);
8706         bnx2_request_irq(bp);
8707         bnx2_init_nic(bp, 1);
8708         bnx2_netif_start(bp, true);
8709         return 0;
8710 }
8711
8712 static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
8713 #define BNX2_PM_OPS (&bnx2_pm_ops)
8714
8715 #else
8716
8717 #define BNX2_PM_OPS NULL
8718
8719 #endif /* CONFIG_PM_SLEEP */
8720 /**
8721  * bnx2_io_error_detected - called when PCI error is detected
8722  * @pdev: Pointer to PCI device
8723  * @state: The current pci connection state
8724  *
8725  * This function is called after a PCI bus error affecting
8726  * this device has been detected.
8727  */
8728 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8729                                                pci_channel_state_t state)
8730 {
8731         struct net_device *dev = pci_get_drvdata(pdev);
8732         struct bnx2 *bp = netdev_priv(dev);
8733
8734         rtnl_lock();
8735         netif_device_detach(dev);
8736
8737         if (state == pci_channel_io_perm_failure) {
8738                 rtnl_unlock();
8739                 return PCI_ERS_RESULT_DISCONNECT;
8740         }
8741
8742         if (netif_running(dev)) {
8743                 bnx2_netif_stop(bp, true);
8744                 del_timer_sync(&bp->timer);
8745                 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8746         }
8747
8748         pci_disable_device(pdev);
8749         rtnl_unlock();
8750
8751         /* Request a slot slot reset. */
8752         return PCI_ERS_RESULT_NEED_RESET;
8753 }
8754
8755 /**
8756  * bnx2_io_slot_reset - called after the pci bus has been reset.
8757  * @pdev: Pointer to PCI device
8758  *
8759  * Restart the card from scratch, as if from a cold-boot.
8760  */
8761 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8762 {
8763         struct net_device *dev = pci_get_drvdata(pdev);
8764         struct bnx2 *bp = netdev_priv(dev);
8765         pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
8766         int err = 0;
8767
8768         rtnl_lock();
8769         if (pci_enable_device(pdev)) {
8770                 dev_err(&pdev->dev,
8771                         "Cannot re-enable PCI device after reset\n");
8772         } else {
8773                 pci_set_master(pdev);
8774                 pci_restore_state(pdev);
8775                 pci_save_state(pdev);
8776
8777                 if (netif_running(dev))
8778                         err = bnx2_init_nic(bp, 1);
8779
8780                 if (!err)
8781                         result = PCI_ERS_RESULT_RECOVERED;
8782         }
8783
8784         if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
8785                 bnx2_napi_enable(bp);
8786                 dev_close(dev);
8787         }
8788         rtnl_unlock();
8789
8790         if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8791                 return result;
8792
8793         err = pci_cleanup_aer_uncorrect_error_status(pdev);
8794         if (err) {
8795                 dev_err(&pdev->dev,
8796                         "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8797                          err); /* non-fatal, continue */
8798         }
8799
8800         return result;
8801 }
8802
8803 /**
8804  * bnx2_io_resume - called when traffic can start flowing again.
8805  * @pdev: Pointer to PCI device
8806  *
8807  * This callback is called when the error recovery driver tells us that
8808  * its OK to resume normal operation.
8809  */
8810 static void bnx2_io_resume(struct pci_dev *pdev)
8811 {
8812         struct net_device *dev = pci_get_drvdata(pdev);
8813         struct bnx2 *bp = netdev_priv(dev);
8814
8815         rtnl_lock();
8816         if (netif_running(dev))
8817                 bnx2_netif_start(bp, true);
8818
8819         netif_device_attach(dev);
8820         rtnl_unlock();
8821 }
8822
8823 static void bnx2_shutdown(struct pci_dev *pdev)
8824 {
8825         struct net_device *dev = pci_get_drvdata(pdev);
8826         struct bnx2 *bp;
8827
8828         if (!dev)
8829                 return;
8830
8831         bp = netdev_priv(dev);
8832         if (!bp)
8833                 return;
8834
8835         rtnl_lock();
8836         if (netif_running(dev))
8837                 dev_close(bp->dev);
8838
8839         if (system_state == SYSTEM_POWER_OFF)
8840                 bnx2_set_power_state(bp, PCI_D3hot);
8841
8842         rtnl_unlock();
8843 }
8844
8845 static const struct pci_error_handlers bnx2_err_handler = {
8846         .error_detected = bnx2_io_error_detected,
8847         .slot_reset     = bnx2_io_slot_reset,
8848         .resume         = bnx2_io_resume,
8849 };
8850
8851 static struct pci_driver bnx2_pci_driver = {
8852         .name           = DRV_MODULE_NAME,
8853         .id_table       = bnx2_pci_tbl,
8854         .probe          = bnx2_init_one,
8855         .remove         = bnx2_remove_one,
8856         .driver.pm      = BNX2_PM_OPS,
8857         .err_handler    = &bnx2_err_handler,
8858         .shutdown       = bnx2_shutdown,
8859 };
8860
8861 module_pci_driver(bnx2_pci_driver);