GNU Linux-libre 5.10.215-gnu1
[releases.git] / drivers / net / ethernet / broadcom / bnx2.c
1 /* bnx2.c: QLogic bnx2 network driver.
2  *
3  * Copyright (c) 2004-2014 Broadcom Corporation
4  * Copyright (c) 2014-2015 QLogic Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  *
10  * Written by: Michael Chan  (mchan@broadcom.com)
11  */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17
18 #include <linux/stringify.h>
19 #include <linux/kernel.h>
20 #include <linux/timer.h>
21 #include <linux/errno.h>
22 #include <linux/ioport.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <linux/interrupt.h>
26 #include <linux/pci.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/skbuff.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/bitops.h>
32 #include <asm/io.h>
33 #include <asm/irq.h>
34 #include <linux/delay.h>
35 #include <asm/byteorder.h>
36 #include <asm/page.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
40 #include <linux/if.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/aer.h>
52 #include <linux/crash_dump.h>
53
54 #if IS_ENABLED(CONFIG_CNIC)
55 #define BCM_CNIC 1
56 #include "cnic_if.h"
57 #endif
58 #include "bnx2.h"
59 #include "bnx2_fw.h"
60
61 #define DRV_MODULE_NAME         "bnx2"
62 #define FW_MIPS_FILE_06         "/*(DEBLOBBED)*/"
63 #define FW_RV2P_FILE_06         "/*(DEBLOBBED)*/"
64 #define FW_MIPS_FILE_09         "/*(DEBLOBBED)*/"
65 #define FW_RV2P_FILE_09_Ax      "/*(DEBLOBBED)*/"
66 #define FW_RV2P_FILE_09         "/*(DEBLOBBED)*/"
67
68 #define RUN_AT(x) (jiffies + (x))
69
70 /* Time in jiffies before concluding the transmitter is hung. */
71 #define TX_TIMEOUT  (5*HZ)
72
73 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
74 MODULE_DESCRIPTION("QLogic BCM5706/5708/5709/5716 Driver");
75 MODULE_LICENSE("GPL");
76 /*(DEBLOBBED)*/
77
78 static int disable_msi = 0;
79
80 module_param(disable_msi, int, 0444);
81 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
82
83 typedef enum {
84         BCM5706 = 0,
85         NC370T,
86         NC370I,
87         BCM5706S,
88         NC370F,
89         BCM5708,
90         BCM5708S,
91         BCM5709,
92         BCM5709S,
93         BCM5716,
94         BCM5716S,
95 } board_t;
96
97 /* indexed by board_t, above */
98 static struct {
99         char *name;
100 } board_info[] = {
101         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
102         { "HP NC370T Multifunction Gigabit Server Adapter" },
103         { "HP NC370i Multifunction Gigabit Server Adapter" },
104         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
105         { "HP NC370F Multifunction Gigabit Server Adapter" },
106         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
107         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
108         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
109         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
110         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
111         { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
112         };
113
114 static const struct pci_device_id bnx2_pci_tbl[] = {
115         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
116           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
117         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
118           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
119         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
120           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
121         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
122           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
123         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
124           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
125         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
126           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
127         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
128           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
129         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
130           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
131         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
132           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
133         { PCI_VENDOR_ID_BROADCOM, 0x163b,
134           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
135         { PCI_VENDOR_ID_BROADCOM, 0x163c,
136           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
137         { 0, }
138 };
139
140 static const struct flash_spec flash_table[] =
141 {
142 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
143 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
144         /* Slow EEPROM */
145         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
146          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
147          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
148          "EEPROM - slow"},
149         /* Expansion entry 0001 */
150         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
151          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
152          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
153          "Entry 0001"},
154         /* Saifun SA25F010 (non-buffered flash) */
155         /* strap, cfg1, & write1 need updates */
156         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
157          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
159          "Non-buffered flash (128kB)"},
160         /* Saifun SA25F020 (non-buffered flash) */
161         /* strap, cfg1, & write1 need updates */
162         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
163          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
164          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
165          "Non-buffered flash (256kB)"},
166         /* Expansion entry 0100 */
167         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
168          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
169          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
170          "Entry 0100"},
171         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
172         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
173          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
174          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
175          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
176         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
177         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
178          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
179          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
180          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
181         /* Saifun SA25F005 (non-buffered flash) */
182         /* strap, cfg1, & write1 need updates */
183         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
184          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
185          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
186          "Non-buffered flash (64kB)"},
187         /* Fast EEPROM */
188         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
189          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
190          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
191          "EEPROM - fast"},
192         /* Expansion entry 1001 */
193         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
194          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
196          "Entry 1001"},
197         /* Expansion entry 1010 */
198         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
199          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
200          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201          "Entry 1010"},
202         /* ATMEL AT45DB011B (buffered flash) */
203         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
204          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
205          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
206          "Buffered flash (128kB)"},
207         /* Expansion entry 1100 */
208         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
209          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
210          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
211          "Entry 1100"},
212         /* Expansion entry 1101 */
213         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
214          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
215          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
216          "Entry 1101"},
217         /* Ateml Expansion entry 1110 */
218         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
219          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
220          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
221          "Entry 1110 (Atmel)"},
222         /* ATMEL AT45DB021B (buffered flash) */
223         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
224          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
225          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
226          "Buffered flash (256kB)"},
227 };
228
229 static const struct flash_spec flash_5709 = {
230         .flags          = BNX2_NV_BUFFERED,
231         .page_bits      = BCM5709_FLASH_PAGE_BITS,
232         .page_size      = BCM5709_FLASH_PAGE_SIZE,
233         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
234         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
235         .name           = "5709 Buffered flash (256kB)",
236 };
237
238 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
239
240 static void bnx2_init_napi(struct bnx2 *bp);
241 static void bnx2_del_napi(struct bnx2 *bp);
242
243 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
244 {
245         u32 diff;
246
247         /* The ring uses 256 indices for 255 entries, one of them
248          * needs to be skipped.
249          */
250         diff = READ_ONCE(txr->tx_prod) - READ_ONCE(txr->tx_cons);
251         if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
252                 diff &= 0xffff;
253                 if (diff == BNX2_TX_DESC_CNT)
254                         diff = BNX2_MAX_TX_DESC_CNT;
255         }
256         return bp->tx_ring_size - diff;
257 }
258
259 static u32
260 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
261 {
262         unsigned long flags;
263         u32 val;
264
265         spin_lock_irqsave(&bp->indirect_lock, flags);
266         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
267         val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
268         spin_unlock_irqrestore(&bp->indirect_lock, flags);
269         return val;
270 }
271
272 static void
273 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
274 {
275         unsigned long flags;
276
277         spin_lock_irqsave(&bp->indirect_lock, flags);
278         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
279         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
280         spin_unlock_irqrestore(&bp->indirect_lock, flags);
281 }
282
283 static void
284 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
285 {
286         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
287 }
288
289 static u32
290 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
291 {
292         return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
293 }
294
295 static void
296 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
297 {
298         unsigned long flags;
299
300         offset += cid_addr;
301         spin_lock_irqsave(&bp->indirect_lock, flags);
302         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
303                 int i;
304
305                 BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
306                 BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
307                         offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
308                 for (i = 0; i < 5; i++) {
309                         val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
310                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
311                                 break;
312                         udelay(5);
313                 }
314         } else {
315                 BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
316                 BNX2_WR(bp, BNX2_CTX_DATA, val);
317         }
318         spin_unlock_irqrestore(&bp->indirect_lock, flags);
319 }
320
321 #ifdef BCM_CNIC
322 static int
323 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
324 {
325         struct bnx2 *bp = netdev_priv(dev);
326         struct drv_ctl_io *io = &info->data.io;
327
328         switch (info->cmd) {
329         case DRV_CTL_IO_WR_CMD:
330                 bnx2_reg_wr_ind(bp, io->offset, io->data);
331                 break;
332         case DRV_CTL_IO_RD_CMD:
333                 io->data = bnx2_reg_rd_ind(bp, io->offset);
334                 break;
335         case DRV_CTL_CTX_WR_CMD:
336                 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
337                 break;
338         default:
339                 return -EINVAL;
340         }
341         return 0;
342 }
343
344 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
345 {
346         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
347         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
348         int sb_id;
349
350         if (bp->flags & BNX2_FLAG_USING_MSIX) {
351                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
352                 bnapi->cnic_present = 0;
353                 sb_id = bp->irq_nvecs;
354                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
355         } else {
356                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
357                 bnapi->cnic_tag = bnapi->last_status_idx;
358                 bnapi->cnic_present = 1;
359                 sb_id = 0;
360                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
361         }
362
363         cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
364         cp->irq_arr[0].status_blk = (void *)
365                 ((unsigned long) bnapi->status_blk.msi +
366                 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
367         cp->irq_arr[0].status_blk_num = sb_id;
368         cp->num_irq = 1;
369 }
370
371 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
372                               void *data)
373 {
374         struct bnx2 *bp = netdev_priv(dev);
375         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
376
377         if (!ops)
378                 return -EINVAL;
379
380         if (cp->drv_state & CNIC_DRV_STATE_REGD)
381                 return -EBUSY;
382
383         if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
384                 return -ENODEV;
385
386         bp->cnic_data = data;
387         rcu_assign_pointer(bp->cnic_ops, ops);
388
389         cp->num_irq = 0;
390         cp->drv_state = CNIC_DRV_STATE_REGD;
391
392         bnx2_setup_cnic_irq_info(bp);
393
394         return 0;
395 }
396
397 static int bnx2_unregister_cnic(struct net_device *dev)
398 {
399         struct bnx2 *bp = netdev_priv(dev);
400         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
401         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
402
403         mutex_lock(&bp->cnic_lock);
404         cp->drv_state = 0;
405         bnapi->cnic_present = 0;
406         RCU_INIT_POINTER(bp->cnic_ops, NULL);
407         mutex_unlock(&bp->cnic_lock);
408         synchronize_rcu();
409         return 0;
410 }
411
412 static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
413 {
414         struct bnx2 *bp = netdev_priv(dev);
415         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
416
417         if (!cp->max_iscsi_conn)
418                 return NULL;
419
420         cp->drv_owner = THIS_MODULE;
421         cp->chip_id = bp->chip_id;
422         cp->pdev = bp->pdev;
423         cp->io_base = bp->regview;
424         cp->drv_ctl = bnx2_drv_ctl;
425         cp->drv_register_cnic = bnx2_register_cnic;
426         cp->drv_unregister_cnic = bnx2_unregister_cnic;
427
428         return cp;
429 }
430
431 static void
432 bnx2_cnic_stop(struct bnx2 *bp)
433 {
434         struct cnic_ops *c_ops;
435         struct cnic_ctl_info info;
436
437         mutex_lock(&bp->cnic_lock);
438         c_ops = rcu_dereference_protected(bp->cnic_ops,
439                                           lockdep_is_held(&bp->cnic_lock));
440         if (c_ops) {
441                 info.cmd = CNIC_CTL_STOP_CMD;
442                 c_ops->cnic_ctl(bp->cnic_data, &info);
443         }
444         mutex_unlock(&bp->cnic_lock);
445 }
446
447 static void
448 bnx2_cnic_start(struct bnx2 *bp)
449 {
450         struct cnic_ops *c_ops;
451         struct cnic_ctl_info info;
452
453         mutex_lock(&bp->cnic_lock);
454         c_ops = rcu_dereference_protected(bp->cnic_ops,
455                                           lockdep_is_held(&bp->cnic_lock));
456         if (c_ops) {
457                 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
458                         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
459
460                         bnapi->cnic_tag = bnapi->last_status_idx;
461                 }
462                 info.cmd = CNIC_CTL_START_CMD;
463                 c_ops->cnic_ctl(bp->cnic_data, &info);
464         }
465         mutex_unlock(&bp->cnic_lock);
466 }
467
468 #else
469
470 static void
471 bnx2_cnic_stop(struct bnx2 *bp)
472 {
473 }
474
475 static void
476 bnx2_cnic_start(struct bnx2 *bp)
477 {
478 }
479
480 #endif
481
482 static int
483 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
484 {
485         u32 val1;
486         int i, ret;
487
488         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
489                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
490                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
491
492                 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
493                 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
494
495                 udelay(40);
496         }
497
498         val1 = (bp->phy_addr << 21) | (reg << 16) |
499                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
500                 BNX2_EMAC_MDIO_COMM_START_BUSY;
501         BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
502
503         for (i = 0; i < 50; i++) {
504                 udelay(10);
505
506                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
507                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
508                         udelay(5);
509
510                         val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
511                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
512
513                         break;
514                 }
515         }
516
517         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
518                 *val = 0x0;
519                 ret = -EBUSY;
520         }
521         else {
522                 *val = val1;
523                 ret = 0;
524         }
525
526         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
527                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
528                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
529
530                 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
531                 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
532
533                 udelay(40);
534         }
535
536         return ret;
537 }
538
539 static int
540 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
541 {
542         u32 val1;
543         int i, ret;
544
545         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
546                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
547                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
548
549                 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
550                 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
551
552                 udelay(40);
553         }
554
555         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
556                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
557                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
558         BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
559
560         for (i = 0; i < 50; i++) {
561                 udelay(10);
562
563                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
564                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
565                         udelay(5);
566                         break;
567                 }
568         }
569
570         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
571                 ret = -EBUSY;
572         else
573                 ret = 0;
574
575         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
576                 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
577                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
578
579                 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
580                 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
581
582                 udelay(40);
583         }
584
585         return ret;
586 }
587
588 static void
589 bnx2_disable_int(struct bnx2 *bp)
590 {
591         int i;
592         struct bnx2_napi *bnapi;
593
594         for (i = 0; i < bp->irq_nvecs; i++) {
595                 bnapi = &bp->bnx2_napi[i];
596                 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
597                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
598         }
599         BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
600 }
601
602 static void
603 bnx2_enable_int(struct bnx2 *bp)
604 {
605         int i;
606         struct bnx2_napi *bnapi;
607
608         for (i = 0; i < bp->irq_nvecs; i++) {
609                 bnapi = &bp->bnx2_napi[i];
610
611                 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
612                         BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
613                         BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
614                         bnapi->last_status_idx);
615
616                 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
617                         BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
618                         bnapi->last_status_idx);
619         }
620         BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
621 }
622
623 static void
624 bnx2_disable_int_sync(struct bnx2 *bp)
625 {
626         int i;
627
628         atomic_inc(&bp->intr_sem);
629         if (!netif_running(bp->dev))
630                 return;
631
632         bnx2_disable_int(bp);
633         for (i = 0; i < bp->irq_nvecs; i++)
634                 synchronize_irq(bp->irq_tbl[i].vector);
635 }
636
637 static void
638 bnx2_napi_disable(struct bnx2 *bp)
639 {
640         int i;
641
642         for (i = 0; i < bp->irq_nvecs; i++)
643                 napi_disable(&bp->bnx2_napi[i].napi);
644 }
645
646 static void
647 bnx2_napi_enable(struct bnx2 *bp)
648 {
649         int i;
650
651         for (i = 0; i < bp->irq_nvecs; i++)
652                 napi_enable(&bp->bnx2_napi[i].napi);
653 }
654
655 static void
656 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
657 {
658         if (stop_cnic)
659                 bnx2_cnic_stop(bp);
660         if (netif_running(bp->dev)) {
661                 bnx2_napi_disable(bp);
662                 netif_tx_disable(bp->dev);
663         }
664         bnx2_disable_int_sync(bp);
665         netif_carrier_off(bp->dev);     /* prevent tx timeout */
666 }
667
668 static void
669 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
670 {
671         if (atomic_dec_and_test(&bp->intr_sem)) {
672                 if (netif_running(bp->dev)) {
673                         netif_tx_wake_all_queues(bp->dev);
674                         spin_lock_bh(&bp->phy_lock);
675                         if (bp->link_up)
676                                 netif_carrier_on(bp->dev);
677                         spin_unlock_bh(&bp->phy_lock);
678                         bnx2_napi_enable(bp);
679                         bnx2_enable_int(bp);
680                         if (start_cnic)
681                                 bnx2_cnic_start(bp);
682                 }
683         }
684 }
685
686 static void
687 bnx2_free_tx_mem(struct bnx2 *bp)
688 {
689         int i;
690
691         for (i = 0; i < bp->num_tx_rings; i++) {
692                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
693                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
694
695                 if (txr->tx_desc_ring) {
696                         dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
697                                           txr->tx_desc_ring,
698                                           txr->tx_desc_mapping);
699                         txr->tx_desc_ring = NULL;
700                 }
701                 kfree(txr->tx_buf_ring);
702                 txr->tx_buf_ring = NULL;
703         }
704 }
705
706 static void
707 bnx2_free_rx_mem(struct bnx2 *bp)
708 {
709         int i;
710
711         for (i = 0; i < bp->num_rx_rings; i++) {
712                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
713                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
714                 int j;
715
716                 for (j = 0; j < bp->rx_max_ring; j++) {
717                         if (rxr->rx_desc_ring[j])
718                                 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
719                                                   rxr->rx_desc_ring[j],
720                                                   rxr->rx_desc_mapping[j]);
721                         rxr->rx_desc_ring[j] = NULL;
722                 }
723                 vfree(rxr->rx_buf_ring);
724                 rxr->rx_buf_ring = NULL;
725
726                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
727                         if (rxr->rx_pg_desc_ring[j])
728                                 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
729                                                   rxr->rx_pg_desc_ring[j],
730                                                   rxr->rx_pg_desc_mapping[j]);
731                         rxr->rx_pg_desc_ring[j] = NULL;
732                 }
733                 vfree(rxr->rx_pg_ring);
734                 rxr->rx_pg_ring = NULL;
735         }
736 }
737
738 static int
739 bnx2_alloc_tx_mem(struct bnx2 *bp)
740 {
741         int i;
742
743         for (i = 0; i < bp->num_tx_rings; i++) {
744                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
745                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
746
747                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
748                 if (!txr->tx_buf_ring)
749                         return -ENOMEM;
750
751                 txr->tx_desc_ring =
752                         dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
753                                            &txr->tx_desc_mapping, GFP_KERNEL);
754                 if (!txr->tx_desc_ring)
755                         return -ENOMEM;
756         }
757         return 0;
758 }
759
760 static int
761 bnx2_alloc_rx_mem(struct bnx2 *bp)
762 {
763         int i;
764
765         for (i = 0; i < bp->num_rx_rings; i++) {
766                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
767                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
768                 int j;
769
770                 rxr->rx_buf_ring =
771                         vzalloc(array_size(SW_RXBD_RING_SIZE, bp->rx_max_ring));
772                 if (!rxr->rx_buf_ring)
773                         return -ENOMEM;
774
775                 for (j = 0; j < bp->rx_max_ring; j++) {
776                         rxr->rx_desc_ring[j] =
777                                 dma_alloc_coherent(&bp->pdev->dev,
778                                                    RXBD_RING_SIZE,
779                                                    &rxr->rx_desc_mapping[j],
780                                                    GFP_KERNEL);
781                         if (!rxr->rx_desc_ring[j])
782                                 return -ENOMEM;
783
784                 }
785
786                 if (bp->rx_pg_ring_size) {
787                         rxr->rx_pg_ring =
788                                 vzalloc(array_size(SW_RXPG_RING_SIZE,
789                                                    bp->rx_max_pg_ring));
790                         if (!rxr->rx_pg_ring)
791                                 return -ENOMEM;
792
793                 }
794
795                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
796                         rxr->rx_pg_desc_ring[j] =
797                                 dma_alloc_coherent(&bp->pdev->dev,
798                                                    RXBD_RING_SIZE,
799                                                    &rxr->rx_pg_desc_mapping[j],
800                                                    GFP_KERNEL);
801                         if (!rxr->rx_pg_desc_ring[j])
802                                 return -ENOMEM;
803
804                 }
805         }
806         return 0;
807 }
808
809 static void
810 bnx2_free_stats_blk(struct net_device *dev)
811 {
812         struct bnx2 *bp = netdev_priv(dev);
813
814         if (bp->status_blk) {
815                 dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
816                                   bp->status_blk,
817                                   bp->status_blk_mapping);
818                 bp->status_blk = NULL;
819                 bp->stats_blk = NULL;
820         }
821 }
822
823 static int
824 bnx2_alloc_stats_blk(struct net_device *dev)
825 {
826         int status_blk_size;
827         void *status_blk;
828         struct bnx2 *bp = netdev_priv(dev);
829
830         /* Combine status and statistics blocks into one allocation. */
831         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
832         if (bp->flags & BNX2_FLAG_MSIX_CAP)
833                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
834                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
835         bp->status_stats_size = status_blk_size +
836                                 sizeof(struct statistics_block);
837         status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
838                                         &bp->status_blk_mapping, GFP_KERNEL);
839         if (!status_blk)
840                 return -ENOMEM;
841
842         bp->status_blk = status_blk;
843         bp->stats_blk = status_blk + status_blk_size;
844         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
845
846         return 0;
847 }
848
849 static void
850 bnx2_free_mem(struct bnx2 *bp)
851 {
852         int i;
853         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
854
855         bnx2_free_tx_mem(bp);
856         bnx2_free_rx_mem(bp);
857
858         for (i = 0; i < bp->ctx_pages; i++) {
859                 if (bp->ctx_blk[i]) {
860                         dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
861                                           bp->ctx_blk[i],
862                                           bp->ctx_blk_mapping[i]);
863                         bp->ctx_blk[i] = NULL;
864                 }
865         }
866
867         if (bnapi->status_blk.msi)
868                 bnapi->status_blk.msi = NULL;
869 }
870
871 static int
872 bnx2_alloc_mem(struct bnx2 *bp)
873 {
874         int i, err;
875         struct bnx2_napi *bnapi;
876
877         bnapi = &bp->bnx2_napi[0];
878         bnapi->status_blk.msi = bp->status_blk;
879         bnapi->hw_tx_cons_ptr =
880                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
881         bnapi->hw_rx_cons_ptr =
882                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
883         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
884                 for (i = 1; i < bp->irq_nvecs; i++) {
885                         struct status_block_msix *sblk;
886
887                         bnapi = &bp->bnx2_napi[i];
888
889                         sblk = (bp->status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
890                         bnapi->status_blk.msix = sblk;
891                         bnapi->hw_tx_cons_ptr =
892                                 &sblk->status_tx_quick_consumer_index;
893                         bnapi->hw_rx_cons_ptr =
894                                 &sblk->status_rx_quick_consumer_index;
895                         bnapi->int_num = i << 24;
896                 }
897         }
898
899         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
900                 bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
901                 if (bp->ctx_pages == 0)
902                         bp->ctx_pages = 1;
903                 for (i = 0; i < bp->ctx_pages; i++) {
904                         bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
905                                                 BNX2_PAGE_SIZE,
906                                                 &bp->ctx_blk_mapping[i],
907                                                 GFP_KERNEL);
908                         if (!bp->ctx_blk[i])
909                                 goto alloc_mem_err;
910                 }
911         }
912
913         err = bnx2_alloc_rx_mem(bp);
914         if (err)
915                 goto alloc_mem_err;
916
917         err = bnx2_alloc_tx_mem(bp);
918         if (err)
919                 goto alloc_mem_err;
920
921         return 0;
922
923 alloc_mem_err:
924         bnx2_free_mem(bp);
925         return -ENOMEM;
926 }
927
928 static void
929 bnx2_report_fw_link(struct bnx2 *bp)
930 {
931         u32 fw_link_status = 0;
932
933         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
934                 return;
935
936         if (bp->link_up) {
937                 u32 bmsr;
938
939                 switch (bp->line_speed) {
940                 case SPEED_10:
941                         if (bp->duplex == DUPLEX_HALF)
942                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
943                         else
944                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
945                         break;
946                 case SPEED_100:
947                         if (bp->duplex == DUPLEX_HALF)
948                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
949                         else
950                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
951                         break;
952                 case SPEED_1000:
953                         if (bp->duplex == DUPLEX_HALF)
954                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
955                         else
956                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
957                         break;
958                 case SPEED_2500:
959                         if (bp->duplex == DUPLEX_HALF)
960                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
961                         else
962                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
963                         break;
964                 }
965
966                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
967
968                 if (bp->autoneg) {
969                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
970
971                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
972                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
973
974                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
975                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
976                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
977                         else
978                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
979                 }
980         }
981         else
982                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
983
984         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
985 }
986
987 static char *
988 bnx2_xceiver_str(struct bnx2 *bp)
989 {
990         return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
991                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
992                  "Copper");
993 }
994
995 static void
996 bnx2_report_link(struct bnx2 *bp)
997 {
998         if (bp->link_up) {
999                 netif_carrier_on(bp->dev);
1000                 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
1001                             bnx2_xceiver_str(bp),
1002                             bp->line_speed,
1003                             bp->duplex == DUPLEX_FULL ? "full" : "half");
1004
1005                 if (bp->flow_ctrl) {
1006                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
1007                                 pr_cont(", receive ");
1008                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1009                                         pr_cont("& transmit ");
1010                         }
1011                         else {
1012                                 pr_cont(", transmit ");
1013                         }
1014                         pr_cont("flow control ON");
1015                 }
1016                 pr_cont("\n");
1017         } else {
1018                 netif_carrier_off(bp->dev);
1019                 netdev_err(bp->dev, "NIC %s Link is Down\n",
1020                            bnx2_xceiver_str(bp));
1021         }
1022
1023         bnx2_report_fw_link(bp);
1024 }
1025
1026 static void
1027 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1028 {
1029         u32 local_adv, remote_adv;
1030
1031         bp->flow_ctrl = 0;
1032         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1033                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1034
1035                 if (bp->duplex == DUPLEX_FULL) {
1036                         bp->flow_ctrl = bp->req_flow_ctrl;
1037                 }
1038                 return;
1039         }
1040
1041         if (bp->duplex != DUPLEX_FULL) {
1042                 return;
1043         }
1044
1045         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1046             (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
1047                 u32 val;
1048
1049                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1050                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1051                         bp->flow_ctrl |= FLOW_CTRL_TX;
1052                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1053                         bp->flow_ctrl |= FLOW_CTRL_RX;
1054                 return;
1055         }
1056
1057         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1058         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1059
1060         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1061                 u32 new_local_adv = 0;
1062                 u32 new_remote_adv = 0;
1063
1064                 if (local_adv & ADVERTISE_1000XPAUSE)
1065                         new_local_adv |= ADVERTISE_PAUSE_CAP;
1066                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1067                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
1068                 if (remote_adv & ADVERTISE_1000XPAUSE)
1069                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
1070                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1071                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1072
1073                 local_adv = new_local_adv;
1074                 remote_adv = new_remote_adv;
1075         }
1076
1077         /* See Table 28B-3 of 802.3ab-1999 spec. */
1078         if (local_adv & ADVERTISE_PAUSE_CAP) {
1079                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1080                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1081                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1082                         }
1083                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1084                                 bp->flow_ctrl = FLOW_CTRL_RX;
1085                         }
1086                 }
1087                 else {
1088                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1089                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1090                         }
1091                 }
1092         }
1093         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1094                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1095                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1096
1097                         bp->flow_ctrl = FLOW_CTRL_TX;
1098                 }
1099         }
1100 }
1101
1102 static int
1103 bnx2_5709s_linkup(struct bnx2 *bp)
1104 {
1105         u32 val, speed;
1106
1107         bp->link_up = 1;
1108
1109         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1110         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1111         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1112
1113         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1114                 bp->line_speed = bp->req_line_speed;
1115                 bp->duplex = bp->req_duplex;
1116                 return 0;
1117         }
1118         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1119         switch (speed) {
1120                 case MII_BNX2_GP_TOP_AN_SPEED_10:
1121                         bp->line_speed = SPEED_10;
1122                         break;
1123                 case MII_BNX2_GP_TOP_AN_SPEED_100:
1124                         bp->line_speed = SPEED_100;
1125                         break;
1126                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1127                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1128                         bp->line_speed = SPEED_1000;
1129                         break;
1130                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1131                         bp->line_speed = SPEED_2500;
1132                         break;
1133         }
1134         if (val & MII_BNX2_GP_TOP_AN_FD)
1135                 bp->duplex = DUPLEX_FULL;
1136         else
1137                 bp->duplex = DUPLEX_HALF;
1138         return 0;
1139 }
1140
1141 static int
1142 bnx2_5708s_linkup(struct bnx2 *bp)
1143 {
1144         u32 val;
1145
1146         bp->link_up = 1;
1147         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1148         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1149                 case BCM5708S_1000X_STAT1_SPEED_10:
1150                         bp->line_speed = SPEED_10;
1151                         break;
1152                 case BCM5708S_1000X_STAT1_SPEED_100:
1153                         bp->line_speed = SPEED_100;
1154                         break;
1155                 case BCM5708S_1000X_STAT1_SPEED_1G:
1156                         bp->line_speed = SPEED_1000;
1157                         break;
1158                 case BCM5708S_1000X_STAT1_SPEED_2G5:
1159                         bp->line_speed = SPEED_2500;
1160                         break;
1161         }
1162         if (val & BCM5708S_1000X_STAT1_FD)
1163                 bp->duplex = DUPLEX_FULL;
1164         else
1165                 bp->duplex = DUPLEX_HALF;
1166
1167         return 0;
1168 }
1169
1170 static int
1171 bnx2_5706s_linkup(struct bnx2 *bp)
1172 {
1173         u32 bmcr, local_adv, remote_adv, common;
1174
1175         bp->link_up = 1;
1176         bp->line_speed = SPEED_1000;
1177
1178         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1179         if (bmcr & BMCR_FULLDPLX) {
1180                 bp->duplex = DUPLEX_FULL;
1181         }
1182         else {
1183                 bp->duplex = DUPLEX_HALF;
1184         }
1185
1186         if (!(bmcr & BMCR_ANENABLE)) {
1187                 return 0;
1188         }
1189
1190         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1191         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1192
1193         common = local_adv & remote_adv;
1194         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1195
1196                 if (common & ADVERTISE_1000XFULL) {
1197                         bp->duplex = DUPLEX_FULL;
1198                 }
1199                 else {
1200                         bp->duplex = DUPLEX_HALF;
1201                 }
1202         }
1203
1204         return 0;
1205 }
1206
1207 static int
1208 bnx2_copper_linkup(struct bnx2 *bp)
1209 {
1210         u32 bmcr;
1211
1212         bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX;
1213
1214         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1215         if (bmcr & BMCR_ANENABLE) {
1216                 u32 local_adv, remote_adv, common;
1217
1218                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1219                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1220
1221                 common = local_adv & (remote_adv >> 2);
1222                 if (common & ADVERTISE_1000FULL) {
1223                         bp->line_speed = SPEED_1000;
1224                         bp->duplex = DUPLEX_FULL;
1225                 }
1226                 else if (common & ADVERTISE_1000HALF) {
1227                         bp->line_speed = SPEED_1000;
1228                         bp->duplex = DUPLEX_HALF;
1229                 }
1230                 else {
1231                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1232                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1233
1234                         common = local_adv & remote_adv;
1235                         if (common & ADVERTISE_100FULL) {
1236                                 bp->line_speed = SPEED_100;
1237                                 bp->duplex = DUPLEX_FULL;
1238                         }
1239                         else if (common & ADVERTISE_100HALF) {
1240                                 bp->line_speed = SPEED_100;
1241                                 bp->duplex = DUPLEX_HALF;
1242                         }
1243                         else if (common & ADVERTISE_10FULL) {
1244                                 bp->line_speed = SPEED_10;
1245                                 bp->duplex = DUPLEX_FULL;
1246                         }
1247                         else if (common & ADVERTISE_10HALF) {
1248                                 bp->line_speed = SPEED_10;
1249                                 bp->duplex = DUPLEX_HALF;
1250                         }
1251                         else {
1252                                 bp->line_speed = 0;
1253                                 bp->link_up = 0;
1254                         }
1255                 }
1256         }
1257         else {
1258                 if (bmcr & BMCR_SPEED100) {
1259                         bp->line_speed = SPEED_100;
1260                 }
1261                 else {
1262                         bp->line_speed = SPEED_10;
1263                 }
1264                 if (bmcr & BMCR_FULLDPLX) {
1265                         bp->duplex = DUPLEX_FULL;
1266                 }
1267                 else {
1268                         bp->duplex = DUPLEX_HALF;
1269                 }
1270         }
1271
1272         if (bp->link_up) {
1273                 u32 ext_status;
1274
1275                 bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status);
1276                 if (ext_status & EXT_STATUS_MDIX)
1277                         bp->phy_flags |= BNX2_PHY_FLAG_MDIX;
1278         }
1279
1280         return 0;
1281 }
1282
1283 static void
1284 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1285 {
1286         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1287
1288         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1289         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1290         val |= 0x02 << 8;
1291
1292         if (bp->flow_ctrl & FLOW_CTRL_TX)
1293                 val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1294
1295         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1296 }
1297
1298 static void
1299 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1300 {
1301         int i;
1302         u32 cid;
1303
1304         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1305                 if (i == 1)
1306                         cid = RX_RSS_CID;
1307                 bnx2_init_rx_context(bp, cid);
1308         }
1309 }
1310
1311 static void
1312 bnx2_set_mac_link(struct bnx2 *bp)
1313 {
1314         u32 val;
1315
1316         BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1317         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1318                 (bp->duplex == DUPLEX_HALF)) {
1319                 BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1320         }
1321
1322         /* Configure the EMAC mode register. */
1323         val = BNX2_RD(bp, BNX2_EMAC_MODE);
1324
1325         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1326                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1327                 BNX2_EMAC_MODE_25G_MODE);
1328
1329         if (bp->link_up) {
1330                 switch (bp->line_speed) {
1331                         case SPEED_10:
1332                                 if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
1333                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1334                                         break;
1335                                 }
1336                                 fallthrough;
1337                         case SPEED_100:
1338                                 val |= BNX2_EMAC_MODE_PORT_MII;
1339                                 break;
1340                         case SPEED_2500:
1341                                 val |= BNX2_EMAC_MODE_25G_MODE;
1342                                 fallthrough;
1343                         case SPEED_1000:
1344                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1345                                 break;
1346                 }
1347         }
1348         else {
1349                 val |= BNX2_EMAC_MODE_PORT_GMII;
1350         }
1351
1352         /* Set the MAC to operate in the appropriate duplex mode. */
1353         if (bp->duplex == DUPLEX_HALF)
1354                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1355         BNX2_WR(bp, BNX2_EMAC_MODE, val);
1356
1357         /* Enable/disable rx PAUSE. */
1358         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1359
1360         if (bp->flow_ctrl & FLOW_CTRL_RX)
1361                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1362         BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1363
1364         /* Enable/disable tx PAUSE. */
1365         val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1366         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1367
1368         if (bp->flow_ctrl & FLOW_CTRL_TX)
1369                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1370         BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1371
1372         /* Acknowledge the interrupt. */
1373         BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1374
1375         bnx2_init_all_rx_contexts(bp);
1376 }
1377
1378 static void
1379 bnx2_enable_bmsr1(struct bnx2 *bp)
1380 {
1381         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1382             (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1383                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1384                                MII_BNX2_BLK_ADDR_GP_STATUS);
1385 }
1386
1387 static void
1388 bnx2_disable_bmsr1(struct bnx2 *bp)
1389 {
1390         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1391             (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1392                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1393                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1394 }
1395
1396 static int
1397 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1398 {
1399         u32 up1;
1400         int ret = 1;
1401
1402         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1403                 return 0;
1404
1405         if (bp->autoneg & AUTONEG_SPEED)
1406                 bp->advertising |= ADVERTISED_2500baseX_Full;
1407
1408         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1409                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1410
1411         bnx2_read_phy(bp, bp->mii_up1, &up1);
1412         if (!(up1 & BCM5708S_UP1_2G5)) {
1413                 up1 |= BCM5708S_UP1_2G5;
1414                 bnx2_write_phy(bp, bp->mii_up1, up1);
1415                 ret = 0;
1416         }
1417
1418         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1419                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1420                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1421
1422         return ret;
1423 }
1424
1425 static int
1426 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1427 {
1428         u32 up1;
1429         int ret = 0;
1430
1431         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1432                 return 0;
1433
1434         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1435                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1436
1437         bnx2_read_phy(bp, bp->mii_up1, &up1);
1438         if (up1 & BCM5708S_UP1_2G5) {
1439                 up1 &= ~BCM5708S_UP1_2G5;
1440                 bnx2_write_phy(bp, bp->mii_up1, up1);
1441                 ret = 1;
1442         }
1443
1444         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1445                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1446                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1447
1448         return ret;
1449 }
1450
1451 static void
1452 bnx2_enable_forced_2g5(struct bnx2 *bp)
1453 {
1454         u32 bmcr;
1455         int err;
1456
1457         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1458                 return;
1459
1460         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1461                 u32 val;
1462
1463                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1464                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1465                 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1466                         val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1467                         val |= MII_BNX2_SD_MISC1_FORCE |
1468                                 MII_BNX2_SD_MISC1_FORCE_2_5G;
1469                         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1470                 }
1471
1472                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1473                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1474                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1475
1476         } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1477                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1478                 if (!err)
1479                         bmcr |= BCM5708S_BMCR_FORCE_2500;
1480         } else {
1481                 return;
1482         }
1483
1484         if (err)
1485                 return;
1486
1487         if (bp->autoneg & AUTONEG_SPEED) {
1488                 bmcr &= ~BMCR_ANENABLE;
1489                 if (bp->req_duplex == DUPLEX_FULL)
1490                         bmcr |= BMCR_FULLDPLX;
1491         }
1492         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1493 }
1494
1495 static void
1496 bnx2_disable_forced_2g5(struct bnx2 *bp)
1497 {
1498         u32 bmcr;
1499         int err;
1500
1501         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1502                 return;
1503
1504         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1505                 u32 val;
1506
1507                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1508                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1509                 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1510                         val &= ~MII_BNX2_SD_MISC1_FORCE;
1511                         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1512                 }
1513
1514                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1515                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1516                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1517
1518         } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1519                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1520                 if (!err)
1521                         bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1522         } else {
1523                 return;
1524         }
1525
1526         if (err)
1527                 return;
1528
1529         if (bp->autoneg & AUTONEG_SPEED)
1530                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1531         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1532 }
1533
1534 static void
1535 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1536 {
1537         u32 val;
1538
1539         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1540         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1541         if (start)
1542                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1543         else
1544                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1545 }
1546
1547 static int
1548 bnx2_set_link(struct bnx2 *bp)
1549 {
1550         u32 bmsr;
1551         u8 link_up;
1552
1553         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1554                 bp->link_up = 1;
1555                 return 0;
1556         }
1557
1558         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1559                 return 0;
1560
1561         link_up = bp->link_up;
1562
1563         bnx2_enable_bmsr1(bp);
1564         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1565         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1566         bnx2_disable_bmsr1(bp);
1567
1568         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1569             (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
1570                 u32 val, an_dbg;
1571
1572                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1573                         bnx2_5706s_force_link_dn(bp, 0);
1574                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1575                 }
1576                 val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1577
1578                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1579                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1580                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1581
1582                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1583                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1584                         bmsr |= BMSR_LSTATUS;
1585                 else
1586                         bmsr &= ~BMSR_LSTATUS;
1587         }
1588
1589         if (bmsr & BMSR_LSTATUS) {
1590                 bp->link_up = 1;
1591
1592                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1593                         if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
1594                                 bnx2_5706s_linkup(bp);
1595                         else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
1596                                 bnx2_5708s_linkup(bp);
1597                         else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1598                                 bnx2_5709s_linkup(bp);
1599                 }
1600                 else {
1601                         bnx2_copper_linkup(bp);
1602                 }
1603                 bnx2_resolve_flow_ctrl(bp);
1604         }
1605         else {
1606                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1607                     (bp->autoneg & AUTONEG_SPEED))
1608                         bnx2_disable_forced_2g5(bp);
1609
1610                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1611                         u32 bmcr;
1612
1613                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1614                         bmcr |= BMCR_ANENABLE;
1615                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1616
1617                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1618                 }
1619                 bp->link_up = 0;
1620         }
1621
1622         if (bp->link_up != link_up) {
1623                 bnx2_report_link(bp);
1624         }
1625
1626         bnx2_set_mac_link(bp);
1627
1628         return 0;
1629 }
1630
1631 static int
1632 bnx2_reset_phy(struct bnx2 *bp)
1633 {
1634         int i;
1635         u32 reg;
1636
1637         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1638
1639 #define PHY_RESET_MAX_WAIT 100
1640         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1641                 udelay(10);
1642
1643                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1644                 if (!(reg & BMCR_RESET)) {
1645                         udelay(20);
1646                         break;
1647                 }
1648         }
1649         if (i == PHY_RESET_MAX_WAIT) {
1650                 return -EBUSY;
1651         }
1652         return 0;
1653 }
1654
1655 static u32
1656 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1657 {
1658         u32 adv = 0;
1659
1660         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1661                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1662
1663                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1664                         adv = ADVERTISE_1000XPAUSE;
1665                 }
1666                 else {
1667                         adv = ADVERTISE_PAUSE_CAP;
1668                 }
1669         }
1670         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1671                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1672                         adv = ADVERTISE_1000XPSE_ASYM;
1673                 }
1674                 else {
1675                         adv = ADVERTISE_PAUSE_ASYM;
1676                 }
1677         }
1678         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1679                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1680                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1681                 }
1682                 else {
1683                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1684                 }
1685         }
1686         return adv;
1687 }
1688
1689 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1690
1691 static int
1692 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1693 __releases(&bp->phy_lock)
1694 __acquires(&bp->phy_lock)
1695 {
1696         u32 speed_arg = 0, pause_adv;
1697
1698         pause_adv = bnx2_phy_get_pause_adv(bp);
1699
1700         if (bp->autoneg & AUTONEG_SPEED) {
1701                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1702                 if (bp->advertising & ADVERTISED_10baseT_Half)
1703                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1704                 if (bp->advertising & ADVERTISED_10baseT_Full)
1705                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1706                 if (bp->advertising & ADVERTISED_100baseT_Half)
1707                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1708                 if (bp->advertising & ADVERTISED_100baseT_Full)
1709                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1710                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1711                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1712                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1713                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1714         } else {
1715                 if (bp->req_line_speed == SPEED_2500)
1716                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1717                 else if (bp->req_line_speed == SPEED_1000)
1718                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1719                 else if (bp->req_line_speed == SPEED_100) {
1720                         if (bp->req_duplex == DUPLEX_FULL)
1721                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1722                         else
1723                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1724                 } else if (bp->req_line_speed == SPEED_10) {
1725                         if (bp->req_duplex == DUPLEX_FULL)
1726                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1727                         else
1728                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1729                 }
1730         }
1731
1732         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1733                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1734         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1735                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1736
1737         if (port == PORT_TP)
1738                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1739                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1740
1741         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1742
1743         spin_unlock_bh(&bp->phy_lock);
1744         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1745         spin_lock_bh(&bp->phy_lock);
1746
1747         return 0;
1748 }
1749
1750 static int
1751 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1752 __releases(&bp->phy_lock)
1753 __acquires(&bp->phy_lock)
1754 {
1755         u32 adv, bmcr;
1756         u32 new_adv = 0;
1757
1758         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1759                 return bnx2_setup_remote_phy(bp, port);
1760
1761         if (!(bp->autoneg & AUTONEG_SPEED)) {
1762                 u32 new_bmcr;
1763                 int force_link_down = 0;
1764
1765                 if (bp->req_line_speed == SPEED_2500) {
1766                         if (!bnx2_test_and_enable_2g5(bp))
1767                                 force_link_down = 1;
1768                 } else if (bp->req_line_speed == SPEED_1000) {
1769                         if (bnx2_test_and_disable_2g5(bp))
1770                                 force_link_down = 1;
1771                 }
1772                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1773                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1774
1775                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1776                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1777                 new_bmcr |= BMCR_SPEED1000;
1778
1779                 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1780                         if (bp->req_line_speed == SPEED_2500)
1781                                 bnx2_enable_forced_2g5(bp);
1782                         else if (bp->req_line_speed == SPEED_1000) {
1783                                 bnx2_disable_forced_2g5(bp);
1784                                 new_bmcr &= ~0x2000;
1785                         }
1786
1787                 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1788                         if (bp->req_line_speed == SPEED_2500)
1789                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1790                         else
1791                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1792                 }
1793
1794                 if (bp->req_duplex == DUPLEX_FULL) {
1795                         adv |= ADVERTISE_1000XFULL;
1796                         new_bmcr |= BMCR_FULLDPLX;
1797                 }
1798                 else {
1799                         adv |= ADVERTISE_1000XHALF;
1800                         new_bmcr &= ~BMCR_FULLDPLX;
1801                 }
1802                 if ((new_bmcr != bmcr) || (force_link_down)) {
1803                         /* Force a link down visible on the other side */
1804                         if (bp->link_up) {
1805                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1806                                                ~(ADVERTISE_1000XFULL |
1807                                                  ADVERTISE_1000XHALF));
1808                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1809                                         BMCR_ANRESTART | BMCR_ANENABLE);
1810
1811                                 bp->link_up = 0;
1812                                 netif_carrier_off(bp->dev);
1813                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1814                                 bnx2_report_link(bp);
1815                         }
1816                         bnx2_write_phy(bp, bp->mii_adv, adv);
1817                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1818                 } else {
1819                         bnx2_resolve_flow_ctrl(bp);
1820                         bnx2_set_mac_link(bp);
1821                 }
1822                 return 0;
1823         }
1824
1825         bnx2_test_and_enable_2g5(bp);
1826
1827         if (bp->advertising & ADVERTISED_1000baseT_Full)
1828                 new_adv |= ADVERTISE_1000XFULL;
1829
1830         new_adv |= bnx2_phy_get_pause_adv(bp);
1831
1832         bnx2_read_phy(bp, bp->mii_adv, &adv);
1833         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1834
1835         bp->serdes_an_pending = 0;
1836         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1837                 /* Force a link down visible on the other side */
1838                 if (bp->link_up) {
1839                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1840                         spin_unlock_bh(&bp->phy_lock);
1841                         msleep(20);
1842                         spin_lock_bh(&bp->phy_lock);
1843                 }
1844
1845                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1846                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1847                         BMCR_ANENABLE);
1848                 /* Speed up link-up time when the link partner
1849                  * does not autonegotiate which is very common
1850                  * in blade servers. Some blade servers use
1851                  * IPMI for kerboard input and it's important
1852                  * to minimize link disruptions. Autoneg. involves
1853                  * exchanging base pages plus 3 next pages and
1854                  * normally completes in about 120 msec.
1855                  */
1856                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1857                 bp->serdes_an_pending = 1;
1858                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1859         } else {
1860                 bnx2_resolve_flow_ctrl(bp);
1861                 bnx2_set_mac_link(bp);
1862         }
1863
1864         return 0;
1865 }
1866
1867 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1868         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1869                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1870                 (ADVERTISED_1000baseT_Full)
1871
1872 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1873         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1874         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1875         ADVERTISED_1000baseT_Full)
1876
1877 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1878         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1879
1880 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1881
1882 static void
1883 bnx2_set_default_remote_link(struct bnx2 *bp)
1884 {
1885         u32 link;
1886
1887         if (bp->phy_port == PORT_TP)
1888                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1889         else
1890                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1891
1892         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1893                 bp->req_line_speed = 0;
1894                 bp->autoneg |= AUTONEG_SPEED;
1895                 bp->advertising = ADVERTISED_Autoneg;
1896                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1897                         bp->advertising |= ADVERTISED_10baseT_Half;
1898                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1899                         bp->advertising |= ADVERTISED_10baseT_Full;
1900                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1901                         bp->advertising |= ADVERTISED_100baseT_Half;
1902                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1903                         bp->advertising |= ADVERTISED_100baseT_Full;
1904                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1905                         bp->advertising |= ADVERTISED_1000baseT_Full;
1906                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1907                         bp->advertising |= ADVERTISED_2500baseX_Full;
1908         } else {
1909                 bp->autoneg = 0;
1910                 bp->advertising = 0;
1911                 bp->req_duplex = DUPLEX_FULL;
1912                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1913                         bp->req_line_speed = SPEED_10;
1914                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1915                                 bp->req_duplex = DUPLEX_HALF;
1916                 }
1917                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1918                         bp->req_line_speed = SPEED_100;
1919                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1920                                 bp->req_duplex = DUPLEX_HALF;
1921                 }
1922                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1923                         bp->req_line_speed = SPEED_1000;
1924                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1925                         bp->req_line_speed = SPEED_2500;
1926         }
1927 }
1928
1929 static void
1930 bnx2_set_default_link(struct bnx2 *bp)
1931 {
1932         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1933                 bnx2_set_default_remote_link(bp);
1934                 return;
1935         }
1936
1937         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1938         bp->req_line_speed = 0;
1939         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1940                 u32 reg;
1941
1942                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1943
1944                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1945                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1946                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1947                         bp->autoneg = 0;
1948                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1949                         bp->req_duplex = DUPLEX_FULL;
1950                 }
1951         } else
1952                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1953 }
1954
1955 static void
1956 bnx2_send_heart_beat(struct bnx2 *bp)
1957 {
1958         u32 msg;
1959         u32 addr;
1960
1961         spin_lock(&bp->indirect_lock);
1962         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1963         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1964         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1965         BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1966         spin_unlock(&bp->indirect_lock);
1967 }
1968
1969 static void
1970 bnx2_remote_phy_event(struct bnx2 *bp)
1971 {
1972         u32 msg;
1973         u8 link_up = bp->link_up;
1974         u8 old_port;
1975
1976         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1977
1978         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1979                 bnx2_send_heart_beat(bp);
1980
1981         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1982
1983         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1984                 bp->link_up = 0;
1985         else {
1986                 u32 speed;
1987
1988                 bp->link_up = 1;
1989                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1990                 bp->duplex = DUPLEX_FULL;
1991                 switch (speed) {
1992                         case BNX2_LINK_STATUS_10HALF:
1993                                 bp->duplex = DUPLEX_HALF;
1994                                 fallthrough;
1995                         case BNX2_LINK_STATUS_10FULL:
1996                                 bp->line_speed = SPEED_10;
1997                                 break;
1998                         case BNX2_LINK_STATUS_100HALF:
1999                                 bp->duplex = DUPLEX_HALF;
2000                                 fallthrough;
2001                         case BNX2_LINK_STATUS_100BASE_T4:
2002                         case BNX2_LINK_STATUS_100FULL:
2003                                 bp->line_speed = SPEED_100;
2004                                 break;
2005                         case BNX2_LINK_STATUS_1000HALF:
2006                                 bp->duplex = DUPLEX_HALF;
2007                                 fallthrough;
2008                         case BNX2_LINK_STATUS_1000FULL:
2009                                 bp->line_speed = SPEED_1000;
2010                                 break;
2011                         case BNX2_LINK_STATUS_2500HALF:
2012                                 bp->duplex = DUPLEX_HALF;
2013                                 fallthrough;
2014                         case BNX2_LINK_STATUS_2500FULL:
2015                                 bp->line_speed = SPEED_2500;
2016                                 break;
2017                         default:
2018                                 bp->line_speed = 0;
2019                                 break;
2020                 }
2021
2022                 bp->flow_ctrl = 0;
2023                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2024                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2025                         if (bp->duplex == DUPLEX_FULL)
2026                                 bp->flow_ctrl = bp->req_flow_ctrl;
2027                 } else {
2028                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2029                                 bp->flow_ctrl |= FLOW_CTRL_TX;
2030                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2031                                 bp->flow_ctrl |= FLOW_CTRL_RX;
2032                 }
2033
2034                 old_port = bp->phy_port;
2035                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2036                         bp->phy_port = PORT_FIBRE;
2037                 else
2038                         bp->phy_port = PORT_TP;
2039
2040                 if (old_port != bp->phy_port)
2041                         bnx2_set_default_link(bp);
2042
2043         }
2044         if (bp->link_up != link_up)
2045                 bnx2_report_link(bp);
2046
2047         bnx2_set_mac_link(bp);
2048 }
2049
2050 static int
2051 bnx2_set_remote_link(struct bnx2 *bp)
2052 {
2053         u32 evt_code;
2054
2055         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2056         switch (evt_code) {
2057                 case BNX2_FW_EVT_CODE_LINK_EVENT:
2058                         bnx2_remote_phy_event(bp);
2059                         break;
2060                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2061                 default:
2062                         bnx2_send_heart_beat(bp);
2063                         break;
2064         }
2065         return 0;
2066 }
2067
2068 static int
2069 bnx2_setup_copper_phy(struct bnx2 *bp)
2070 __releases(&bp->phy_lock)
2071 __acquires(&bp->phy_lock)
2072 {
2073         u32 bmcr, adv_reg, new_adv = 0;
2074         u32 new_bmcr;
2075
2076         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2077
2078         bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2079         adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2080                     ADVERTISE_PAUSE_ASYM);
2081
2082         new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising);
2083
2084         if (bp->autoneg & AUTONEG_SPEED) {
2085                 u32 adv1000_reg;
2086                 u32 new_adv1000 = 0;
2087
2088                 new_adv |= bnx2_phy_get_pause_adv(bp);
2089
2090                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2091                 adv1000_reg &= PHY_ALL_1000_SPEED;
2092
2093                 new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2094                 if ((adv1000_reg != new_adv1000) ||
2095                         (adv_reg != new_adv) ||
2096                         ((bmcr & BMCR_ANENABLE) == 0)) {
2097
2098                         bnx2_write_phy(bp, bp->mii_adv, new_adv);
2099                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2100                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2101                                 BMCR_ANENABLE);
2102                 }
2103                 else if (bp->link_up) {
2104                         /* Flow ctrl may have changed from auto to forced */
2105                         /* or vice-versa. */
2106
2107                         bnx2_resolve_flow_ctrl(bp);
2108                         bnx2_set_mac_link(bp);
2109                 }
2110                 return 0;
2111         }
2112
2113         /* advertise nothing when forcing speed */
2114         if (adv_reg != new_adv)
2115                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
2116
2117         new_bmcr = 0;
2118         if (bp->req_line_speed == SPEED_100) {
2119                 new_bmcr |= BMCR_SPEED100;
2120         }
2121         if (bp->req_duplex == DUPLEX_FULL) {
2122                 new_bmcr |= BMCR_FULLDPLX;
2123         }
2124         if (new_bmcr != bmcr) {
2125                 u32 bmsr;
2126
2127                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2128                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2129
2130                 if (bmsr & BMSR_LSTATUS) {
2131                         /* Force link down */
2132                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2133                         spin_unlock_bh(&bp->phy_lock);
2134                         msleep(50);
2135                         spin_lock_bh(&bp->phy_lock);
2136
2137                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2138                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2139                 }
2140
2141                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2142
2143                 /* Normally, the new speed is setup after the link has
2144                  * gone down and up again. In some cases, link will not go
2145                  * down so we need to set up the new speed here.
2146                  */
2147                 if (bmsr & BMSR_LSTATUS) {
2148                         bp->line_speed = bp->req_line_speed;
2149                         bp->duplex = bp->req_duplex;
2150                         bnx2_resolve_flow_ctrl(bp);
2151                         bnx2_set_mac_link(bp);
2152                 }
2153         } else {
2154                 bnx2_resolve_flow_ctrl(bp);
2155                 bnx2_set_mac_link(bp);
2156         }
2157         return 0;
2158 }
2159
2160 static int
2161 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2162 __releases(&bp->phy_lock)
2163 __acquires(&bp->phy_lock)
2164 {
2165         if (bp->loopback == MAC_LOOPBACK)
2166                 return 0;
2167
2168         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2169                 return bnx2_setup_serdes_phy(bp, port);
2170         }
2171         else {
2172                 return bnx2_setup_copper_phy(bp);
2173         }
2174 }
2175
2176 static int
2177 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2178 {
2179         u32 val;
2180
2181         bp->mii_bmcr = MII_BMCR + 0x10;
2182         bp->mii_bmsr = MII_BMSR + 0x10;
2183         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2184         bp->mii_adv = MII_ADVERTISE + 0x10;
2185         bp->mii_lpa = MII_LPA + 0x10;
2186         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2187
2188         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2189         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2190
2191         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2192         if (reset_phy)
2193                 bnx2_reset_phy(bp);
2194
2195         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2196
2197         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2198         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2199         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2200         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2201
2202         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2203         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2204         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2205                 val |= BCM5708S_UP1_2G5;
2206         else
2207                 val &= ~BCM5708S_UP1_2G5;
2208         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2209
2210         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2211         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2212         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2213         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2214
2215         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2216
2217         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2218               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2219         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2220
2221         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2222
2223         return 0;
2224 }
2225
2226 static int
2227 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2228 {
2229         u32 val;
2230
2231         if (reset_phy)
2232                 bnx2_reset_phy(bp);
2233
2234         bp->mii_up1 = BCM5708S_UP1;
2235
2236         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2237         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2238         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2239
2240         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2241         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2242         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2243
2244         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2245         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2246         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2247
2248         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2249                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2250                 val |= BCM5708S_UP1_2G5;
2251                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2252         }
2253
2254         if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
2255             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
2256             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
2257                 /* increase tx signal amplitude */
2258                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2259                                BCM5708S_BLK_ADDR_TX_MISC);
2260                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2261                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2262                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2263                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2264         }
2265
2266         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2267               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2268
2269         if (val) {
2270                 u32 is_backplane;
2271
2272                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2273                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2274                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2275                                        BCM5708S_BLK_ADDR_TX_MISC);
2276                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2277                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2278                                        BCM5708S_BLK_ADDR_DIG);
2279                 }
2280         }
2281         return 0;
2282 }
2283
2284 static int
2285 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2286 {
2287         if (reset_phy)
2288                 bnx2_reset_phy(bp);
2289
2290         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2291
2292         if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2293                 BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2294
2295         if (bp->dev->mtu > ETH_DATA_LEN) {
2296                 u32 val;
2297
2298                 /* Set extended packet length bit */
2299                 bnx2_write_phy(bp, 0x18, 0x7);
2300                 bnx2_read_phy(bp, 0x18, &val);
2301                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2302
2303                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2304                 bnx2_read_phy(bp, 0x1c, &val);
2305                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2306         }
2307         else {
2308                 u32 val;
2309
2310                 bnx2_write_phy(bp, 0x18, 0x7);
2311                 bnx2_read_phy(bp, 0x18, &val);
2312                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2313
2314                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2315                 bnx2_read_phy(bp, 0x1c, &val);
2316                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2317         }
2318
2319         return 0;
2320 }
2321
2322 static int
2323 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2324 {
2325         u32 val;
2326
2327         if (reset_phy)
2328                 bnx2_reset_phy(bp);
2329
2330         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2331                 bnx2_write_phy(bp, 0x18, 0x0c00);
2332                 bnx2_write_phy(bp, 0x17, 0x000a);
2333                 bnx2_write_phy(bp, 0x15, 0x310b);
2334                 bnx2_write_phy(bp, 0x17, 0x201f);
2335                 bnx2_write_phy(bp, 0x15, 0x9506);
2336                 bnx2_write_phy(bp, 0x17, 0x401f);
2337                 bnx2_write_phy(bp, 0x15, 0x14e2);
2338                 bnx2_write_phy(bp, 0x18, 0x0400);
2339         }
2340
2341         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2342                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2343                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2344                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2345                 val &= ~(1 << 8);
2346                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2347         }
2348
2349         if (bp->dev->mtu > ETH_DATA_LEN) {
2350                 /* Set extended packet length bit */
2351                 bnx2_write_phy(bp, 0x18, 0x7);
2352                 bnx2_read_phy(bp, 0x18, &val);
2353                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2354
2355                 bnx2_read_phy(bp, 0x10, &val);
2356                 bnx2_write_phy(bp, 0x10, val | 0x1);
2357         }
2358         else {
2359                 bnx2_write_phy(bp, 0x18, 0x7);
2360                 bnx2_read_phy(bp, 0x18, &val);
2361                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2362
2363                 bnx2_read_phy(bp, 0x10, &val);
2364                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2365         }
2366
2367         /* ethernet@wirespeed */
2368         bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL);
2369         bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val);
2370         val |=  AUX_CTL_MISC_CTL_WR | AUX_CTL_MISC_CTL_WIRESPEED;
2371
2372         /* auto-mdix */
2373         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2374                 val |=  AUX_CTL_MISC_CTL_AUTOMDIX;
2375
2376         bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val);
2377         return 0;
2378 }
2379
2380
2381 static int
2382 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2383 __releases(&bp->phy_lock)
2384 __acquires(&bp->phy_lock)
2385 {
2386         u32 val;
2387         int rc = 0;
2388
2389         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2390         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2391
2392         bp->mii_bmcr = MII_BMCR;
2393         bp->mii_bmsr = MII_BMSR;
2394         bp->mii_bmsr1 = MII_BMSR;
2395         bp->mii_adv = MII_ADVERTISE;
2396         bp->mii_lpa = MII_LPA;
2397
2398         BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2399
2400         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2401                 goto setup_phy;
2402
2403         bnx2_read_phy(bp, MII_PHYSID1, &val);
2404         bp->phy_id = val << 16;
2405         bnx2_read_phy(bp, MII_PHYSID2, &val);
2406         bp->phy_id |= val & 0xffff;
2407
2408         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2409                 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2410                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2411                 else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
2412                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2413                 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2414                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2415         }
2416         else {
2417                 rc = bnx2_init_copper_phy(bp, reset_phy);
2418         }
2419
2420 setup_phy:
2421         if (!rc)
2422                 rc = bnx2_setup_phy(bp, bp->phy_port);
2423
2424         return rc;
2425 }
2426
2427 static int
2428 bnx2_set_mac_loopback(struct bnx2 *bp)
2429 {
2430         u32 mac_mode;
2431
2432         mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2433         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2434         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2435         BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2436         bp->link_up = 1;
2437         return 0;
2438 }
2439
2440 static int bnx2_test_link(struct bnx2 *);
2441
2442 static int
2443 bnx2_set_phy_loopback(struct bnx2 *bp)
2444 {
2445         u32 mac_mode;
2446         int rc, i;
2447
2448         spin_lock_bh(&bp->phy_lock);
2449         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2450                             BMCR_SPEED1000);
2451         spin_unlock_bh(&bp->phy_lock);
2452         if (rc)
2453                 return rc;
2454
2455         for (i = 0; i < 10; i++) {
2456                 if (bnx2_test_link(bp) == 0)
2457                         break;
2458                 msleep(100);
2459         }
2460
2461         mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2462         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2463                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2464                       BNX2_EMAC_MODE_25G_MODE);
2465
2466         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2467         BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2468         bp->link_up = 1;
2469         return 0;
2470 }
2471
2472 static void
2473 bnx2_dump_mcp_state(struct bnx2 *bp)
2474 {
2475         struct net_device *dev = bp->dev;
2476         u32 mcp_p0, mcp_p1;
2477
2478         netdev_err(dev, "<--- start MCP states dump --->\n");
2479         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
2480                 mcp_p0 = BNX2_MCP_STATE_P0;
2481                 mcp_p1 = BNX2_MCP_STATE_P1;
2482         } else {
2483                 mcp_p0 = BNX2_MCP_STATE_P0_5708;
2484                 mcp_p1 = BNX2_MCP_STATE_P1_5708;
2485         }
2486         netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2487                    bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2488         netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2489                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2490                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2491                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2492         netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2493                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2494                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2495                    bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2496         netdev_err(dev, "DEBUG: shmem states:\n");
2497         netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2498                    bnx2_shmem_rd(bp, BNX2_DRV_MB),
2499                    bnx2_shmem_rd(bp, BNX2_FW_MB),
2500                    bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2501         pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2502         netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2503                    bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2504                    bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2505         pr_cont(" condition[%08x]\n",
2506                 bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2507         DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
2508         DP_SHMEM_LINE(bp, 0x3cc);
2509         DP_SHMEM_LINE(bp, 0x3dc);
2510         DP_SHMEM_LINE(bp, 0x3ec);
2511         netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2512         netdev_err(dev, "<--- end MCP states dump --->\n");
2513 }
2514
2515 static int
2516 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2517 {
2518         int i;
2519         u32 val;
2520
2521         bp->fw_wr_seq++;
2522         msg_data |= bp->fw_wr_seq;
2523         bp->fw_last_msg = msg_data;
2524
2525         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2526
2527         if (!ack)
2528                 return 0;
2529
2530         /* wait for an acknowledgement. */
2531         for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2532                 msleep(10);
2533
2534                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2535
2536                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2537                         break;
2538         }
2539         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2540                 return 0;
2541
2542         /* If we timed out, inform the firmware that this is the case. */
2543         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2544                 msg_data &= ~BNX2_DRV_MSG_CODE;
2545                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2546
2547                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2548                 if (!silent) {
2549                         pr_err("fw sync timeout, reset code = %x\n", msg_data);
2550                         bnx2_dump_mcp_state(bp);
2551                 }
2552
2553                 return -EBUSY;
2554         }
2555
2556         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2557                 return -EIO;
2558
2559         return 0;
2560 }
2561
2562 static int
2563 bnx2_init_5709_context(struct bnx2 *bp)
2564 {
2565         int i, ret = 0;
2566         u32 val;
2567
2568         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2569         val |= (BNX2_PAGE_BITS - 8) << 16;
2570         BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2571         for (i = 0; i < 10; i++) {
2572                 val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2573                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2574                         break;
2575                 udelay(2);
2576         }
2577         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2578                 return -EBUSY;
2579
2580         for (i = 0; i < bp->ctx_pages; i++) {
2581                 int j;
2582
2583                 if (bp->ctx_blk[i])
2584                         memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2585                 else
2586                         return -ENOMEM;
2587
2588                 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2589                         (bp->ctx_blk_mapping[i] & 0xffffffff) |
2590                         BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2591                 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2592                         (u64) bp->ctx_blk_mapping[i] >> 32);
2593                 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2594                         BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2595                 for (j = 0; j < 10; j++) {
2596
2597                         val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2598                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2599                                 break;
2600                         udelay(5);
2601                 }
2602                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2603                         ret = -EBUSY;
2604                         break;
2605                 }
2606         }
2607         return ret;
2608 }
2609
2610 static void
2611 bnx2_init_context(struct bnx2 *bp)
2612 {
2613         u32 vcid;
2614
2615         vcid = 96;
2616         while (vcid) {
2617                 u32 vcid_addr, pcid_addr, offset;
2618                 int i;
2619
2620                 vcid--;
2621
2622                 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
2623                         u32 new_vcid;
2624
2625                         vcid_addr = GET_PCID_ADDR(vcid);
2626                         if (vcid & 0x8) {
2627                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2628                         }
2629                         else {
2630                                 new_vcid = vcid;
2631                         }
2632                         pcid_addr = GET_PCID_ADDR(new_vcid);
2633                 }
2634                 else {
2635                         vcid_addr = GET_CID_ADDR(vcid);
2636                         pcid_addr = vcid_addr;
2637                 }
2638
2639                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2640                         vcid_addr += (i << PHY_CTX_SHIFT);
2641                         pcid_addr += (i << PHY_CTX_SHIFT);
2642
2643                         BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2644                         BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2645
2646                         /* Zero out the context. */
2647                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2648                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2649                 }
2650         }
2651 }
2652
2653 static int
2654 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2655 {
2656         u16 *good_mbuf;
2657         u32 good_mbuf_cnt;
2658         u32 val;
2659
2660         good_mbuf = kmalloc_array(512, sizeof(u16), GFP_KERNEL);
2661         if (!good_mbuf)
2662                 return -ENOMEM;
2663
2664         BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2665                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2666
2667         good_mbuf_cnt = 0;
2668
2669         /* Allocate a bunch of mbufs and save the good ones in an array. */
2670         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2671         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2672                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2673                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2674
2675                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2676
2677                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2678
2679                 /* The addresses with Bit 9 set are bad memory blocks. */
2680                 if (!(val & (1 << 9))) {
2681                         good_mbuf[good_mbuf_cnt] = (u16) val;
2682                         good_mbuf_cnt++;
2683                 }
2684
2685                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2686         }
2687
2688         /* Free the good ones back to the mbuf pool thus discarding
2689          * all the bad ones. */
2690         while (good_mbuf_cnt) {
2691                 good_mbuf_cnt--;
2692
2693                 val = good_mbuf[good_mbuf_cnt];
2694                 val = (val << 9) | val | 1;
2695
2696                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2697         }
2698         kfree(good_mbuf);
2699         return 0;
2700 }
2701
2702 static void
2703 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2704 {
2705         u32 val;
2706
2707         val = (mac_addr[0] << 8) | mac_addr[1];
2708
2709         BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2710
2711         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2712                 (mac_addr[4] << 8) | mac_addr[5];
2713
2714         BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2715 }
2716
2717 static inline int
2718 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2719 {
2720         dma_addr_t mapping;
2721         struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2722         struct bnx2_rx_bd *rxbd =
2723                 &rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2724         struct page *page = alloc_page(gfp);
2725
2726         if (!page)
2727                 return -ENOMEM;
2728         mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2729                                PCI_DMA_FROMDEVICE);
2730         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2731                 __free_page(page);
2732                 return -EIO;
2733         }
2734
2735         rx_pg->page = page;
2736         dma_unmap_addr_set(rx_pg, mapping, mapping);
2737         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2738         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2739         return 0;
2740 }
2741
2742 static void
2743 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2744 {
2745         struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2746         struct page *page = rx_pg->page;
2747
2748         if (!page)
2749                 return;
2750
2751         dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2752                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
2753
2754         __free_page(page);
2755         rx_pg->page = NULL;
2756 }
2757
2758 static inline int
2759 bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2760 {
2761         u8 *data;
2762         struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2763         dma_addr_t mapping;
2764         struct bnx2_rx_bd *rxbd =
2765                 &rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2766
2767         data = kmalloc(bp->rx_buf_size, gfp);
2768         if (!data)
2769                 return -ENOMEM;
2770
2771         mapping = dma_map_single(&bp->pdev->dev,
2772                                  get_l2_fhdr(data),
2773                                  bp->rx_buf_use_size,
2774                                  PCI_DMA_FROMDEVICE);
2775         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2776                 kfree(data);
2777                 return -EIO;
2778         }
2779
2780         rx_buf->data = data;
2781         dma_unmap_addr_set(rx_buf, mapping, mapping);
2782
2783         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2784         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2785
2786         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2787
2788         return 0;
2789 }
2790
2791 static int
2792 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2793 {
2794         struct status_block *sblk = bnapi->status_blk.msi;
2795         u32 new_link_state, old_link_state;
2796         int is_set = 1;
2797
2798         new_link_state = sblk->status_attn_bits & event;
2799         old_link_state = sblk->status_attn_bits_ack & event;
2800         if (new_link_state != old_link_state) {
2801                 if (new_link_state)
2802                         BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2803                 else
2804                         BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2805         } else
2806                 is_set = 0;
2807
2808         return is_set;
2809 }
2810
2811 static void
2812 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2813 {
2814         spin_lock(&bp->phy_lock);
2815
2816         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2817                 bnx2_set_link(bp);
2818         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2819                 bnx2_set_remote_link(bp);
2820
2821         spin_unlock(&bp->phy_lock);
2822
2823 }
2824
2825 static inline u16
2826 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2827 {
2828         u16 cons;
2829
2830         cons = READ_ONCE(*bnapi->hw_tx_cons_ptr);
2831
2832         if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
2833                 cons++;
2834         return cons;
2835 }
2836
2837 static int
2838 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2839 {
2840         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2841         u16 hw_cons, sw_cons, sw_ring_cons;
2842         int tx_pkt = 0, index;
2843         unsigned int tx_bytes = 0;
2844         struct netdev_queue *txq;
2845
2846         index = (bnapi - bp->bnx2_napi);
2847         txq = netdev_get_tx_queue(bp->dev, index);
2848
2849         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2850         sw_cons = txr->tx_cons;
2851
2852         while (sw_cons != hw_cons) {
2853                 struct bnx2_sw_tx_bd *tx_buf;
2854                 struct sk_buff *skb;
2855                 int i, last;
2856
2857                 sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
2858
2859                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2860                 skb = tx_buf->skb;
2861
2862                 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2863                 prefetch(&skb->end);
2864
2865                 /* partial BD completions possible with TSO packets */
2866                 if (tx_buf->is_gso) {
2867                         u16 last_idx, last_ring_idx;
2868
2869                         last_idx = sw_cons + tx_buf->nr_frags + 1;
2870                         last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2871                         if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
2872                                 last_idx++;
2873                         }
2874                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2875                                 break;
2876                         }
2877                 }
2878
2879                 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2880                         skb_headlen(skb), PCI_DMA_TODEVICE);
2881
2882                 tx_buf->skb = NULL;
2883                 last = tx_buf->nr_frags;
2884
2885                 for (i = 0; i < last; i++) {
2886                         struct bnx2_sw_tx_bd *tx_buf;
2887
2888                         sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2889
2890                         tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
2891                         dma_unmap_page(&bp->pdev->dev,
2892                                 dma_unmap_addr(tx_buf, mapping),
2893                                 skb_frag_size(&skb_shinfo(skb)->frags[i]),
2894                                 PCI_DMA_TODEVICE);
2895                 }
2896
2897                 sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2898
2899                 tx_bytes += skb->len;
2900                 dev_kfree_skb_any(skb);
2901                 tx_pkt++;
2902                 if (tx_pkt == budget)
2903                         break;
2904
2905                 if (hw_cons == sw_cons)
2906                         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2907         }
2908
2909         netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2910         txr->hw_tx_cons = hw_cons;
2911         txr->tx_cons = sw_cons;
2912
2913         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2914          * before checking for netif_tx_queue_stopped().  Without the
2915          * memory barrier, there is a small possibility that bnx2_start_xmit()
2916          * will miss it and cause the queue to be stopped forever.
2917          */
2918         smp_mb();
2919
2920         if (unlikely(netif_tx_queue_stopped(txq)) &&
2921                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2922                 __netif_tx_lock(txq, smp_processor_id());
2923                 if ((netif_tx_queue_stopped(txq)) &&
2924                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2925                         netif_tx_wake_queue(txq);
2926                 __netif_tx_unlock(txq);
2927         }
2928
2929         return tx_pkt;
2930 }
2931
2932 static void
2933 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2934                         struct sk_buff *skb, int count)
2935 {
2936         struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
2937         struct bnx2_rx_bd *cons_bd, *prod_bd;
2938         int i;
2939         u16 hw_prod, prod;
2940         u16 cons = rxr->rx_pg_cons;
2941
2942         cons_rx_pg = &rxr->rx_pg_ring[cons];
2943
2944         /* The caller was unable to allocate a new page to replace the
2945          * last one in the frags array, so we need to recycle that page
2946          * and then free the skb.
2947          */
2948         if (skb) {
2949                 struct page *page;
2950                 struct skb_shared_info *shinfo;
2951
2952                 shinfo = skb_shinfo(skb);
2953                 shinfo->nr_frags--;
2954                 page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2955                 __skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
2956
2957                 cons_rx_pg->page = page;
2958                 dev_kfree_skb(skb);
2959         }
2960
2961         hw_prod = rxr->rx_pg_prod;
2962
2963         for (i = 0; i < count; i++) {
2964                 prod = BNX2_RX_PG_RING_IDX(hw_prod);
2965
2966                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2967                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2968                 cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
2969                                                 [BNX2_RX_IDX(cons)];
2970                 prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
2971                                                 [BNX2_RX_IDX(prod)];
2972
2973                 if (prod != cons) {
2974                         prod_rx_pg->page = cons_rx_pg->page;
2975                         cons_rx_pg->page = NULL;
2976                         dma_unmap_addr_set(prod_rx_pg, mapping,
2977                                 dma_unmap_addr(cons_rx_pg, mapping));
2978
2979                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2980                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2981
2982                 }
2983                 cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
2984                 hw_prod = BNX2_NEXT_RX_BD(hw_prod);
2985         }
2986         rxr->rx_pg_prod = hw_prod;
2987         rxr->rx_pg_cons = cons;
2988 }
2989
2990 static inline void
2991 bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2992                    u8 *data, u16 cons, u16 prod)
2993 {
2994         struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
2995         struct bnx2_rx_bd *cons_bd, *prod_bd;
2996
2997         cons_rx_buf = &rxr->rx_buf_ring[cons];
2998         prod_rx_buf = &rxr->rx_buf_ring[prod];
2999
3000         dma_sync_single_for_device(&bp->pdev->dev,
3001                 dma_unmap_addr(cons_rx_buf, mapping),
3002                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
3003
3004         rxr->rx_prod_bseq += bp->rx_buf_use_size;
3005
3006         prod_rx_buf->data = data;
3007
3008         if (cons == prod)
3009                 return;
3010
3011         dma_unmap_addr_set(prod_rx_buf, mapping,
3012                         dma_unmap_addr(cons_rx_buf, mapping));
3013
3014         cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
3015         prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
3016         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
3017         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
3018 }
3019
3020 static struct sk_buff *
3021 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
3022             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
3023             u32 ring_idx)
3024 {
3025         int err;
3026         u16 prod = ring_idx & 0xffff;
3027         struct sk_buff *skb;
3028
3029         err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3030         if (unlikely(err)) {
3031                 bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
3032 error:
3033                 if (hdr_len) {
3034                         unsigned int raw_len = len + 4;
3035                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3036
3037                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3038                 }
3039                 return NULL;
3040         }
3041
3042         dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3043                          PCI_DMA_FROMDEVICE);
3044         skb = build_skb(data, 0);
3045         if (!skb) {
3046                 kfree(data);
3047                 goto error;
3048         }
3049         skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3050         if (hdr_len == 0) {
3051                 skb_put(skb, len);
3052                 return skb;
3053         } else {
3054                 unsigned int i, frag_len, frag_size, pages;
3055                 struct bnx2_sw_pg *rx_pg;
3056                 u16 pg_cons = rxr->rx_pg_cons;
3057                 u16 pg_prod = rxr->rx_pg_prod;
3058
3059                 frag_size = len + 4 - hdr_len;
3060                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3061                 skb_put(skb, hdr_len);
3062
3063                 for (i = 0; i < pages; i++) {
3064                         dma_addr_t mapping_old;
3065
3066                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3067                         if (unlikely(frag_len <= 4)) {
3068                                 unsigned int tail = 4 - frag_len;
3069
3070                                 rxr->rx_pg_cons = pg_cons;
3071                                 rxr->rx_pg_prod = pg_prod;
3072                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3073                                                         pages - i);
3074                                 skb->len -= tail;
3075                                 if (i == 0) {
3076                                         skb->tail -= tail;
3077                                 } else {
3078                                         skb_frag_t *frag =
3079                                                 &skb_shinfo(skb)->frags[i - 1];
3080                                         skb_frag_size_sub(frag, tail);
3081                                         skb->data_len -= tail;
3082                                 }
3083                                 return skb;
3084                         }
3085                         rx_pg = &rxr->rx_pg_ring[pg_cons];
3086
3087                         /* Don't unmap yet.  If we're unable to allocate a new
3088                          * page, we need to recycle the page and the DMA addr.
3089                          */
3090                         mapping_old = dma_unmap_addr(rx_pg, mapping);
3091                         if (i == pages - 1)
3092                                 frag_len -= 4;
3093
3094                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3095                         rx_pg->page = NULL;
3096
3097                         err = bnx2_alloc_rx_page(bp, rxr,
3098                                                  BNX2_RX_PG_RING_IDX(pg_prod),
3099                                                  GFP_ATOMIC);
3100                         if (unlikely(err)) {
3101                                 rxr->rx_pg_cons = pg_cons;
3102                                 rxr->rx_pg_prod = pg_prod;
3103                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3104                                                         pages - i);
3105                                 return NULL;
3106                         }
3107
3108                         dma_unmap_page(&bp->pdev->dev, mapping_old,
3109                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
3110
3111                         frag_size -= frag_len;
3112                         skb->data_len += frag_len;
3113                         skb->truesize += PAGE_SIZE;
3114                         skb->len += frag_len;
3115
3116                         pg_prod = BNX2_NEXT_RX_BD(pg_prod);
3117                         pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
3118                 }
3119                 rxr->rx_pg_prod = pg_prod;
3120                 rxr->rx_pg_cons = pg_cons;
3121         }
3122         return skb;
3123 }
3124
3125 static inline u16
3126 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3127 {
3128         u16 cons;
3129
3130         cons = READ_ONCE(*bnapi->hw_rx_cons_ptr);
3131
3132         if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
3133                 cons++;
3134         return cons;
3135 }
3136
3137 static int
3138 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3139 {
3140         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3141         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3142         struct l2_fhdr *rx_hdr;
3143         int rx_pkt = 0, pg_ring_used = 0;
3144
3145         if (budget <= 0)
3146                 return rx_pkt;
3147
3148         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3149         sw_cons = rxr->rx_cons;
3150         sw_prod = rxr->rx_prod;
3151
3152         /* Memory barrier necessary as speculative reads of the rx
3153          * buffer can be ahead of the index in the status block
3154          */
3155         rmb();
3156         while (sw_cons != hw_cons) {
3157                 unsigned int len, hdr_len;
3158                 u32 status;
3159                 struct bnx2_sw_bd *rx_buf, *next_rx_buf;
3160                 struct sk_buff *skb;
3161                 dma_addr_t dma_addr;
3162                 u8 *data;
3163                 u16 next_ring_idx;
3164
3165                 sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
3166                 sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
3167
3168                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3169                 data = rx_buf->data;
3170                 rx_buf->data = NULL;
3171
3172                 rx_hdr = get_l2_fhdr(data);
3173                 prefetch(rx_hdr);
3174
3175                 dma_addr = dma_unmap_addr(rx_buf, mapping);
3176
3177                 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3178                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3179                         PCI_DMA_FROMDEVICE);
3180
3181                 next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
3182                 next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
3183                 prefetch(get_l2_fhdr(next_rx_buf->data));
3184
3185                 len = rx_hdr->l2_fhdr_pkt_len;
3186                 status = rx_hdr->l2_fhdr_status;
3187
3188                 hdr_len = 0;
3189                 if (status & L2_FHDR_STATUS_SPLIT) {
3190                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3191                         pg_ring_used = 1;
3192                 } else if (len > bp->rx_jumbo_thresh) {
3193                         hdr_len = bp->rx_jumbo_thresh;
3194                         pg_ring_used = 1;
3195                 }
3196
3197                 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3198                                        L2_FHDR_ERRORS_PHY_DECODE |
3199                                        L2_FHDR_ERRORS_ALIGNMENT |
3200                                        L2_FHDR_ERRORS_TOO_SHORT |
3201                                        L2_FHDR_ERRORS_GIANT_FRAME))) {
3202
3203                         bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3204                                           sw_ring_prod);
3205                         if (pg_ring_used) {
3206                                 int pages;
3207
3208                                 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3209
3210                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3211                         }
3212                         goto next_rx;
3213                 }
3214
3215                 len -= 4;
3216
3217                 if (len <= bp->rx_copy_thresh) {
3218                         skb = netdev_alloc_skb(bp->dev, len + 6);
3219                         if (!skb) {
3220                                 bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3221                                                   sw_ring_prod);
3222                                 goto next_rx;
3223                         }
3224
3225                         /* aligned copy */
3226                         memcpy(skb->data,
3227                                (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3228                                len + 6);
3229                         skb_reserve(skb, 6);
3230                         skb_put(skb, len);
3231
3232                         bnx2_reuse_rx_data(bp, rxr, data,
3233                                 sw_ring_cons, sw_ring_prod);
3234
3235                 } else {
3236                         skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3237                                           (sw_ring_cons << 16) | sw_ring_prod);
3238                         if (!skb)
3239                                 goto next_rx;
3240                 }
3241                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3242                     !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3243                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
3244
3245                 skb->protocol = eth_type_trans(skb, bp->dev);
3246
3247                 if (len > (bp->dev->mtu + ETH_HLEN) &&
3248                     skb->protocol != htons(0x8100) &&
3249                     skb->protocol != htons(ETH_P_8021AD)) {
3250
3251                         dev_kfree_skb(skb);
3252                         goto next_rx;
3253
3254                 }
3255
3256                 skb_checksum_none_assert(skb);
3257                 if ((bp->dev->features & NETIF_F_RXCSUM) &&
3258                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3259                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
3260
3261                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3262                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3263                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3264                 }
3265                 if ((bp->dev->features & NETIF_F_RXHASH) &&
3266                     ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3267                      L2_FHDR_STATUS_USE_RXHASH))
3268                         skb_set_hash(skb, rx_hdr->l2_fhdr_hash,
3269                                      PKT_HASH_TYPE_L3);
3270
3271                 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3272                 napi_gro_receive(&bnapi->napi, skb);
3273                 rx_pkt++;
3274
3275 next_rx:
3276                 sw_cons = BNX2_NEXT_RX_BD(sw_cons);
3277                 sw_prod = BNX2_NEXT_RX_BD(sw_prod);
3278
3279                 if (rx_pkt == budget)
3280                         break;
3281
3282                 /* Refresh hw_cons to see if there is new work */
3283                 if (sw_cons == hw_cons) {
3284                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3285                         rmb();
3286                 }
3287         }
3288         rxr->rx_cons = sw_cons;
3289         rxr->rx_prod = sw_prod;
3290
3291         if (pg_ring_used)
3292                 BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3293
3294         BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3295
3296         BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3297
3298         return rx_pkt;
3299
3300 }
3301
3302 /* MSI ISR - The only difference between this and the INTx ISR
3303  * is that the MSI interrupt is always serviced.
3304  */
3305 static irqreturn_t
3306 bnx2_msi(int irq, void *dev_instance)
3307 {
3308         struct bnx2_napi *bnapi = dev_instance;
3309         struct bnx2 *bp = bnapi->bp;
3310
3311         prefetch(bnapi->status_blk.msi);
3312         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3313                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3314                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3315
3316         /* Return here if interrupt is disabled. */
3317         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3318                 return IRQ_HANDLED;
3319
3320         napi_schedule(&bnapi->napi);
3321
3322         return IRQ_HANDLED;
3323 }
3324
3325 static irqreturn_t
3326 bnx2_msi_1shot(int irq, void *dev_instance)
3327 {
3328         struct bnx2_napi *bnapi = dev_instance;
3329         struct bnx2 *bp = bnapi->bp;
3330
3331         prefetch(bnapi->status_blk.msi);
3332
3333         /* Return here if interrupt is disabled. */
3334         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3335                 return IRQ_HANDLED;
3336
3337         napi_schedule(&bnapi->napi);
3338
3339         return IRQ_HANDLED;
3340 }
3341
3342 static irqreturn_t
3343 bnx2_interrupt(int irq, void *dev_instance)
3344 {
3345         struct bnx2_napi *bnapi = dev_instance;
3346         struct bnx2 *bp = bnapi->bp;
3347         struct status_block *sblk = bnapi->status_blk.msi;
3348
3349         /* When using INTx, it is possible for the interrupt to arrive
3350          * at the CPU before the status block posted prior to the
3351          * interrupt. Reading a register will flush the status block.
3352          * When using MSI, the MSI message will always complete after
3353          * the status block write.
3354          */
3355         if ((sblk->status_idx == bnapi->last_status_idx) &&
3356             (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3357              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3358                 return IRQ_NONE;
3359
3360         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3361                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3362                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3363
3364         /* Read back to deassert IRQ immediately to avoid too many
3365          * spurious interrupts.
3366          */
3367         BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3368
3369         /* Return here if interrupt is shared and is disabled. */
3370         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3371                 return IRQ_HANDLED;
3372
3373         if (napi_schedule_prep(&bnapi->napi)) {
3374                 bnapi->last_status_idx = sblk->status_idx;
3375                 __napi_schedule(&bnapi->napi);
3376         }
3377
3378         return IRQ_HANDLED;
3379 }
3380
3381 static inline int
3382 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3383 {
3384         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3385         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3386
3387         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3388             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3389                 return 1;
3390         return 0;
3391 }
3392
3393 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3394                                  STATUS_ATTN_BITS_TIMER_ABORT)
3395
3396 static inline int
3397 bnx2_has_work(struct bnx2_napi *bnapi)
3398 {
3399         struct status_block *sblk = bnapi->status_blk.msi;
3400
3401         if (bnx2_has_fast_work(bnapi))
3402                 return 1;
3403
3404 #ifdef BCM_CNIC
3405         if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3406                 return 1;
3407 #endif
3408
3409         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3410             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3411                 return 1;
3412
3413         return 0;
3414 }
3415
3416 static void
3417 bnx2_chk_missed_msi(struct bnx2 *bp)
3418 {
3419         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3420         u32 msi_ctrl;
3421
3422         if (bnx2_has_work(bnapi)) {
3423                 msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3424                 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3425                         return;
3426
3427                 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3428                         BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3429                                 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3430                         BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3431                         bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3432                 }
3433         }
3434
3435         bp->idle_chk_status_idx = bnapi->last_status_idx;
3436 }
3437
3438 #ifdef BCM_CNIC
3439 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3440 {
3441         struct cnic_ops *c_ops;
3442
3443         if (!bnapi->cnic_present)
3444                 return;
3445
3446         rcu_read_lock();
3447         c_ops = rcu_dereference(bp->cnic_ops);
3448         if (c_ops)
3449                 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3450                                                       bnapi->status_blk.msi);
3451         rcu_read_unlock();
3452 }
3453 #endif
3454
3455 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3456 {
3457         struct status_block *sblk = bnapi->status_blk.msi;
3458         u32 status_attn_bits = sblk->status_attn_bits;
3459         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3460
3461         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3462             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3463
3464                 bnx2_phy_int(bp, bnapi);
3465
3466                 /* This is needed to take care of transient status
3467                  * during link changes.
3468                  */
3469                 BNX2_WR(bp, BNX2_HC_COMMAND,
3470                         bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3471                 BNX2_RD(bp, BNX2_HC_COMMAND);
3472         }
3473 }
3474
3475 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3476                           int work_done, int budget)
3477 {
3478         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3479         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3480
3481         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3482                 bnx2_tx_int(bp, bnapi, 0);
3483
3484         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3485                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3486
3487         return work_done;
3488 }
3489
3490 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3491 {
3492         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3493         struct bnx2 *bp = bnapi->bp;
3494         int work_done = 0;
3495         struct status_block_msix *sblk = bnapi->status_blk.msix;
3496
3497         while (1) {
3498                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3499                 if (unlikely(work_done >= budget))
3500                         break;
3501
3502                 bnapi->last_status_idx = sblk->status_idx;
3503                 /* status idx must be read before checking for more work. */
3504                 rmb();
3505                 if (likely(!bnx2_has_fast_work(bnapi))) {
3506
3507                         napi_complete_done(napi, work_done);
3508                         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3509                                 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3510                                 bnapi->last_status_idx);
3511                         break;
3512                 }
3513         }
3514         return work_done;
3515 }
3516
3517 static int bnx2_poll(struct napi_struct *napi, int budget)
3518 {
3519         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3520         struct bnx2 *bp = bnapi->bp;
3521         int work_done = 0;
3522         struct status_block *sblk = bnapi->status_blk.msi;
3523
3524         while (1) {
3525                 bnx2_poll_link(bp, bnapi);
3526
3527                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3528
3529 #ifdef BCM_CNIC
3530                 bnx2_poll_cnic(bp, bnapi);
3531 #endif
3532
3533                 /* bnapi->last_status_idx is used below to tell the hw how
3534                  * much work has been processed, so we must read it before
3535                  * checking for more work.
3536                  */
3537                 bnapi->last_status_idx = sblk->status_idx;
3538
3539                 if (unlikely(work_done >= budget))
3540                         break;
3541
3542                 rmb();
3543                 if (likely(!bnx2_has_work(bnapi))) {
3544                         napi_complete_done(napi, work_done);
3545                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3546                                 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3547                                         BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3548                                         bnapi->last_status_idx);
3549                                 break;
3550                         }
3551                         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3552                                 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3553                                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3554                                 bnapi->last_status_idx);
3555
3556                         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3557                                 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3558                                 bnapi->last_status_idx);
3559                         break;
3560                 }
3561         }
3562
3563         return work_done;
3564 }
3565
3566 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3567  * from set_multicast.
3568  */
3569 static void
3570 bnx2_set_rx_mode(struct net_device *dev)
3571 {
3572         struct bnx2 *bp = netdev_priv(dev);
3573         u32 rx_mode, sort_mode;
3574         struct netdev_hw_addr *ha;
3575         int i;
3576
3577         if (!netif_running(dev))
3578                 return;
3579
3580         spin_lock_bh(&bp->phy_lock);
3581
3582         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3583                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3584         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3585         if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3586              (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3587                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3588         if (dev->flags & IFF_PROMISC) {
3589                 /* Promiscuous mode. */
3590                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3591                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3592                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3593         }
3594         else if (dev->flags & IFF_ALLMULTI) {
3595                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3596                         BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3597                                 0xffffffff);
3598                 }
3599                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3600         }
3601         else {
3602                 /* Accept one or more multicast(s). */
3603                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3604                 u32 regidx;
3605                 u32 bit;
3606                 u32 crc;
3607
3608                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3609
3610                 netdev_for_each_mc_addr(ha, dev) {
3611                         crc = ether_crc_le(ETH_ALEN, ha->addr);
3612                         bit = crc & 0xff;
3613                         regidx = (bit & 0xe0) >> 5;
3614                         bit &= 0x1f;
3615                         mc_filter[regidx] |= (1 << bit);
3616                 }
3617
3618                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3619                         BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3620                                 mc_filter[i]);
3621                 }
3622
3623                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3624         }
3625
3626         if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3627                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3628                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3629                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3630         } else if (!(dev->flags & IFF_PROMISC)) {
3631                 /* Add all entries into to the match filter list */
3632                 i = 0;
3633                 netdev_for_each_uc_addr(ha, dev) {
3634                         bnx2_set_mac_addr(bp, ha->addr,
3635                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3636                         sort_mode |= (1 <<
3637                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3638                         i++;
3639                 }
3640
3641         }
3642
3643         if (rx_mode != bp->rx_mode) {
3644                 bp->rx_mode = rx_mode;
3645                 BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3646         }
3647
3648         BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3649         BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3650         BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3651
3652         spin_unlock_bh(&bp->phy_lock);
3653 }
3654
3655 static int
3656 check_fw_section(const struct firmware *fw,
3657                  const struct bnx2_fw_file_section *section,
3658                  u32 alignment, bool non_empty)
3659 {
3660         u32 offset = be32_to_cpu(section->offset);
3661         u32 len = be32_to_cpu(section->len);
3662
3663         if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3664                 return -EINVAL;
3665         if ((non_empty && len == 0) || len > fw->size - offset ||
3666             len & (alignment - 1))
3667                 return -EINVAL;
3668         return 0;
3669 }
3670
3671 static int
3672 check_mips_fw_entry(const struct firmware *fw,
3673                     const struct bnx2_mips_fw_file_entry *entry)
3674 {
3675         if (check_fw_section(fw, &entry->text, 4, true) ||
3676             check_fw_section(fw, &entry->data, 4, false) ||
3677             check_fw_section(fw, &entry->rodata, 4, false))
3678                 return -EINVAL;
3679         return 0;
3680 }
3681
3682 static void bnx2_release_firmware(struct bnx2 *bp)
3683 {
3684         if (bp->rv2p_firmware) {
3685                 release_firmware(bp->mips_firmware);
3686                 release_firmware(bp->rv2p_firmware);
3687                 bp->rv2p_firmware = NULL;
3688         }
3689 }
3690
3691 static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3692 {
3693         const char *mips_fw_file, *rv2p_fw_file;
3694         const struct bnx2_mips_fw_file *mips_fw;
3695         const struct bnx2_rv2p_fw_file *rv2p_fw;
3696         int rc;
3697
3698         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
3699                 mips_fw_file = FW_MIPS_FILE_09;
3700                 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
3701                     (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
3702                         rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3703                 else
3704                         rv2p_fw_file = FW_RV2P_FILE_09;
3705         } else {
3706                 mips_fw_file = FW_MIPS_FILE_06;
3707                 rv2p_fw_file = FW_RV2P_FILE_06;
3708         }
3709
3710         rc = reject_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3711         if (rc) {
3712                 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3713                 goto out;
3714         }
3715
3716         rc = reject_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3717         if (rc) {
3718                 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3719                 goto err_release_mips_firmware;
3720         }
3721         mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3722         rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3723         if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3724             check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3725             check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3726             check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3727             check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3728             check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3729                 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3730                 rc = -EINVAL;
3731                 goto err_release_firmware;
3732         }
3733         if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3734             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3735             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3736                 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3737                 rc = -EINVAL;
3738                 goto err_release_firmware;
3739         }
3740 out:
3741         return rc;
3742
3743 err_release_firmware:
3744         release_firmware(bp->rv2p_firmware);
3745         bp->rv2p_firmware = NULL;
3746 err_release_mips_firmware:
3747         release_firmware(bp->mips_firmware);
3748         goto out;
3749 }
3750
3751 static int bnx2_request_firmware(struct bnx2 *bp)
3752 {
3753         return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3754 }
3755
3756 static u32
3757 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3758 {
3759         switch (idx) {
3760         case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3761                 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3762                 rv2p_code |= RV2P_BD_PAGE_SIZE;
3763                 break;
3764         }
3765         return rv2p_code;
3766 }
3767
3768 static int
3769 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3770              const struct bnx2_rv2p_fw_file_entry *fw_entry)
3771 {
3772         u32 rv2p_code_len, file_offset;
3773         __be32 *rv2p_code;
3774         int i;
3775         u32 val, cmd, addr;
3776
3777         rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3778         file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3779
3780         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3781
3782         if (rv2p_proc == RV2P_PROC1) {
3783                 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3784                 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3785         } else {
3786                 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3787                 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3788         }
3789
3790         for (i = 0; i < rv2p_code_len; i += 8) {
3791                 BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3792                 rv2p_code++;
3793                 BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3794                 rv2p_code++;
3795
3796                 val = (i / 8) | cmd;
3797                 BNX2_WR(bp, addr, val);
3798         }
3799
3800         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3801         for (i = 0; i < 8; i++) {
3802                 u32 loc, code;
3803
3804                 loc = be32_to_cpu(fw_entry->fixup[i]);
3805                 if (loc && ((loc * 4) < rv2p_code_len)) {
3806                         code = be32_to_cpu(*(rv2p_code + loc - 1));
3807                         BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3808                         code = be32_to_cpu(*(rv2p_code + loc));
3809                         code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3810                         BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3811
3812                         val = (loc / 2) | cmd;
3813                         BNX2_WR(bp, addr, val);
3814                 }
3815         }
3816
3817         /* Reset the processor, un-stall is done later. */
3818         if (rv2p_proc == RV2P_PROC1) {
3819                 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3820         }
3821         else {
3822                 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3823         }
3824
3825         return 0;
3826 }
3827
3828 static int
3829 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3830             const struct bnx2_mips_fw_file_entry *fw_entry)
3831 {
3832         u32 addr, len, file_offset;
3833         __be32 *data;
3834         u32 offset;
3835         u32 val;
3836
3837         /* Halt the CPU. */
3838         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3839         val |= cpu_reg->mode_value_halt;
3840         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3841         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3842
3843         /* Load the Text area. */
3844         addr = be32_to_cpu(fw_entry->text.addr);
3845         len = be32_to_cpu(fw_entry->text.len);
3846         file_offset = be32_to_cpu(fw_entry->text.offset);
3847         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3848
3849         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3850         if (len) {
3851                 int j;
3852
3853                 for (j = 0; j < (len / 4); j++, offset += 4)
3854                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3855         }
3856
3857         /* Load the Data area. */
3858         addr = be32_to_cpu(fw_entry->data.addr);
3859         len = be32_to_cpu(fw_entry->data.len);
3860         file_offset = be32_to_cpu(fw_entry->data.offset);
3861         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3862
3863         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3864         if (len) {
3865                 int j;
3866
3867                 for (j = 0; j < (len / 4); j++, offset += 4)
3868                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3869         }
3870
3871         /* Load the Read-Only area. */
3872         addr = be32_to_cpu(fw_entry->rodata.addr);
3873         len = be32_to_cpu(fw_entry->rodata.len);
3874         file_offset = be32_to_cpu(fw_entry->rodata.offset);
3875         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3876
3877         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3878         if (len) {
3879                 int j;
3880
3881                 for (j = 0; j < (len / 4); j++, offset += 4)
3882                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3883         }
3884
3885         /* Clear the pre-fetch instruction. */
3886         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3887
3888         val = be32_to_cpu(fw_entry->start_addr);
3889         bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3890
3891         /* Start the CPU. */
3892         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3893         val &= ~cpu_reg->mode_value_halt;
3894         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3895         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3896
3897         return 0;
3898 }
3899
3900 static int
3901 bnx2_init_cpus(struct bnx2 *bp)
3902 {
3903         const struct bnx2_mips_fw_file *mips_fw =
3904                 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3905         const struct bnx2_rv2p_fw_file *rv2p_fw =
3906                 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3907         int rc;
3908
3909         /* Initialize the RV2P processor. */
3910         load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3911         load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3912
3913         /* Initialize the RX Processor. */
3914         rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3915         if (rc)
3916                 goto init_cpu_err;
3917
3918         /* Initialize the TX Processor. */
3919         rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3920         if (rc)
3921                 goto init_cpu_err;
3922
3923         /* Initialize the TX Patch-up Processor. */
3924         rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3925         if (rc)
3926                 goto init_cpu_err;
3927
3928         /* Initialize the Completion Processor. */
3929         rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3930         if (rc)
3931                 goto init_cpu_err;
3932
3933         /* Initialize the Command Processor. */
3934         rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3935
3936 init_cpu_err:
3937         return rc;
3938 }
3939
3940 static void
3941 bnx2_setup_wol(struct bnx2 *bp)
3942 {
3943         int i;
3944         u32 val, wol_msg;
3945
3946         if (bp->wol) {
3947                 u32 advertising;
3948                 u8 autoneg;
3949
3950                 autoneg = bp->autoneg;
3951                 advertising = bp->advertising;
3952
3953                 if (bp->phy_port == PORT_TP) {
3954                         bp->autoneg = AUTONEG_SPEED;
3955                         bp->advertising = ADVERTISED_10baseT_Half |
3956                                 ADVERTISED_10baseT_Full |
3957                                 ADVERTISED_100baseT_Half |
3958                                 ADVERTISED_100baseT_Full |
3959                                 ADVERTISED_Autoneg;
3960                 }
3961
3962                 spin_lock_bh(&bp->phy_lock);
3963                 bnx2_setup_phy(bp, bp->phy_port);
3964                 spin_unlock_bh(&bp->phy_lock);
3965
3966                 bp->autoneg = autoneg;
3967                 bp->advertising = advertising;
3968
3969                 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3970
3971                 val = BNX2_RD(bp, BNX2_EMAC_MODE);
3972
3973                 /* Enable port mode. */
3974                 val &= ~BNX2_EMAC_MODE_PORT;
3975                 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3976                        BNX2_EMAC_MODE_ACPI_RCVD |
3977                        BNX2_EMAC_MODE_MPKT;
3978                 if (bp->phy_port == PORT_TP) {
3979                         val |= BNX2_EMAC_MODE_PORT_MII;
3980                 } else {
3981                         val |= BNX2_EMAC_MODE_PORT_GMII;
3982                         if (bp->line_speed == SPEED_2500)
3983                                 val |= BNX2_EMAC_MODE_25G_MODE;
3984                 }
3985
3986                 BNX2_WR(bp, BNX2_EMAC_MODE, val);
3987
3988                 /* receive all multicast */
3989                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3990                         BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3991                                 0xffffffff);
3992                 }
3993                 BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
3994
3995                 val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
3996                 BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3997                 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
3998                 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
3999
4000                 /* Need to enable EMAC and RPM for WOL. */
4001                 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4002                         BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
4003                         BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
4004                         BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
4005
4006                 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4007                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4008                 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4009
4010                 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4011         } else {
4012                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4013         }
4014
4015         if (!(bp->flags & BNX2_FLAG_NO_WOL)) {
4016                 u32 val;
4017
4018                 wol_msg |= BNX2_DRV_MSG_DATA_WAIT3;
4019                 if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) {
4020                         bnx2_fw_sync(bp, wol_msg, 1, 0);
4021                         return;
4022                 }
4023                 /* Tell firmware not to power down the PHY yet, otherwise
4024                  * the chip will take a long time to respond to MMIO reads.
4025                  */
4026                 val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
4027                 bnx2_shmem_wr(bp, BNX2_PORT_FEATURE,
4028                               val | BNX2_PORT_FEATURE_ASF_ENABLED);
4029                 bnx2_fw_sync(bp, wol_msg, 1, 0);
4030                 bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val);
4031         }
4032
4033 }
4034
4035 static int
4036 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
4037 {
4038         switch (state) {
4039         case PCI_D0: {
4040                 u32 val;
4041
4042                 pci_enable_wake(bp->pdev, PCI_D0, false);
4043                 pci_set_power_state(bp->pdev, PCI_D0);
4044
4045                 val = BNX2_RD(bp, BNX2_EMAC_MODE);
4046                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
4047                 val &= ~BNX2_EMAC_MODE_MPKT;
4048                 BNX2_WR(bp, BNX2_EMAC_MODE, val);
4049
4050                 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4051                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4052                 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4053                 break;
4054         }
4055         case PCI_D3hot: {
4056                 bnx2_setup_wol(bp);
4057                 pci_wake_from_d3(bp->pdev, bp->wol);
4058                 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4059                     (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
4060
4061                         if (bp->wol)
4062                                 pci_set_power_state(bp->pdev, PCI_D3hot);
4063                         break;
4064
4065                 }
4066                 if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4067                         u32 val;
4068
4069                         /* Tell firmware not to power down the PHY yet,
4070                          * otherwise the other port may not respond to
4071                          * MMIO reads.
4072                          */
4073                         val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
4074                         val &= ~BNX2_CONDITION_PM_STATE_MASK;
4075                         val |= BNX2_CONDITION_PM_STATE_UNPREP;
4076                         bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val);
4077                 }
4078                 pci_set_power_state(bp->pdev, PCI_D3hot);
4079
4080                 /* No more memory access after this point until
4081                  * device is brought back to D0.
4082                  */
4083                 break;
4084         }
4085         default:
4086                 return -EINVAL;
4087         }
4088         return 0;
4089 }
4090
4091 static int
4092 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4093 {
4094         u32 val;
4095         int j;
4096
4097         /* Request access to the flash interface. */
4098         BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4099         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4100                 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4101                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4102                         break;
4103
4104                 udelay(5);
4105         }
4106
4107         if (j >= NVRAM_TIMEOUT_COUNT)
4108                 return -EBUSY;
4109
4110         return 0;
4111 }
4112
4113 static int
4114 bnx2_release_nvram_lock(struct bnx2 *bp)
4115 {
4116         int j;
4117         u32 val;
4118
4119         /* Relinquish nvram interface. */
4120         BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4121
4122         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4123                 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4124                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4125                         break;
4126
4127                 udelay(5);
4128         }
4129
4130         if (j >= NVRAM_TIMEOUT_COUNT)
4131                 return -EBUSY;
4132
4133         return 0;
4134 }
4135
4136
4137 static int
4138 bnx2_enable_nvram_write(struct bnx2 *bp)
4139 {
4140         u32 val;
4141
4142         val = BNX2_RD(bp, BNX2_MISC_CFG);
4143         BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4144
4145         if (bp->flash_info->flags & BNX2_NV_WREN) {
4146                 int j;
4147
4148                 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4149                 BNX2_WR(bp, BNX2_NVM_COMMAND,
4150                         BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4151
4152                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4153                         udelay(5);
4154
4155                         val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4156                         if (val & BNX2_NVM_COMMAND_DONE)
4157                                 break;
4158                 }
4159
4160                 if (j >= NVRAM_TIMEOUT_COUNT)
4161                         return -EBUSY;
4162         }
4163         return 0;
4164 }
4165
4166 static void
4167 bnx2_disable_nvram_write(struct bnx2 *bp)
4168 {
4169         u32 val;
4170
4171         val = BNX2_RD(bp, BNX2_MISC_CFG);
4172         BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4173 }
4174
4175
4176 static void
4177 bnx2_enable_nvram_access(struct bnx2 *bp)
4178 {
4179         u32 val;
4180
4181         val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4182         /* Enable both bits, even on read. */
4183         BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4184                 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4185 }
4186
4187 static void
4188 bnx2_disable_nvram_access(struct bnx2 *bp)
4189 {
4190         u32 val;
4191
4192         val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4193         /* Disable both bits, even after read. */
4194         BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4195                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4196                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
4197 }
4198
4199 static int
4200 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4201 {
4202         u32 cmd;
4203         int j;
4204
4205         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4206                 /* Buffered flash, no erase needed */
4207                 return 0;
4208
4209         /* Build an erase command */
4210         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4211               BNX2_NVM_COMMAND_DOIT;
4212
4213         /* Need to clear DONE bit separately. */
4214         BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4215
4216         /* Address of the NVRAM to read from. */
4217         BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4218
4219         /* Issue an erase command. */
4220         BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4221
4222         /* Wait for completion. */
4223         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4224                 u32 val;
4225
4226                 udelay(5);
4227
4228                 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4229                 if (val & BNX2_NVM_COMMAND_DONE)
4230                         break;
4231         }
4232
4233         if (j >= NVRAM_TIMEOUT_COUNT)
4234                 return -EBUSY;
4235
4236         return 0;
4237 }
4238
4239 static int
4240 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4241 {
4242         u32 cmd;
4243         int j;
4244
4245         /* Build the command word. */
4246         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4247
4248         /* Calculate an offset of a buffered flash, not needed for 5709. */
4249         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4250                 offset = ((offset / bp->flash_info->page_size) <<
4251                            bp->flash_info->page_bits) +
4252                           (offset % bp->flash_info->page_size);
4253         }
4254
4255         /* Need to clear DONE bit separately. */
4256         BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4257
4258         /* Address of the NVRAM to read from. */
4259         BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4260
4261         /* Issue a read command. */
4262         BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4263
4264         /* Wait for completion. */
4265         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4266                 u32 val;
4267
4268                 udelay(5);
4269
4270                 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4271                 if (val & BNX2_NVM_COMMAND_DONE) {
4272                         __be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
4273                         memcpy(ret_val, &v, 4);
4274                         break;
4275                 }
4276         }
4277         if (j >= NVRAM_TIMEOUT_COUNT)
4278                 return -EBUSY;
4279
4280         return 0;
4281 }
4282
4283
4284 static int
4285 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4286 {
4287         u32 cmd;
4288         __be32 val32;
4289         int j;
4290
4291         /* Build the command word. */
4292         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4293
4294         /* Calculate an offset of a buffered flash, not needed for 5709. */
4295         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4296                 offset = ((offset / bp->flash_info->page_size) <<
4297                           bp->flash_info->page_bits) +
4298                          (offset % bp->flash_info->page_size);
4299         }
4300
4301         /* Need to clear DONE bit separately. */
4302         BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4303
4304         memcpy(&val32, val, 4);
4305
4306         /* Write the data. */
4307         BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4308
4309         /* Address of the NVRAM to write to. */
4310         BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4311
4312         /* Issue the write command. */
4313         BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4314
4315         /* Wait for completion. */
4316         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4317                 udelay(5);
4318
4319                 if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4320                         break;
4321         }
4322         if (j >= NVRAM_TIMEOUT_COUNT)
4323                 return -EBUSY;
4324
4325         return 0;
4326 }
4327
4328 static int
4329 bnx2_init_nvram(struct bnx2 *bp)
4330 {
4331         u32 val;
4332         int j, entry_count, rc = 0;
4333         const struct flash_spec *flash;
4334
4335         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4336                 bp->flash_info = &flash_5709;
4337                 goto get_flash_size;
4338         }
4339
4340         /* Determine the selected interface. */
4341         val = BNX2_RD(bp, BNX2_NVM_CFG1);
4342
4343         entry_count = ARRAY_SIZE(flash_table);
4344
4345         if (val & 0x40000000) {
4346
4347                 /* Flash interface has been reconfigured */
4348                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4349                      j++, flash++) {
4350                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
4351                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4352                                 bp->flash_info = flash;
4353                                 break;
4354                         }
4355                 }
4356         }
4357         else {
4358                 u32 mask;
4359                 /* Not yet been reconfigured */
4360
4361                 if (val & (1 << 23))
4362                         mask = FLASH_BACKUP_STRAP_MASK;
4363                 else
4364                         mask = FLASH_STRAP_MASK;
4365
4366                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4367                         j++, flash++) {
4368
4369                         if ((val & mask) == (flash->strapping & mask)) {
4370                                 bp->flash_info = flash;
4371
4372                                 /* Request access to the flash interface. */
4373                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4374                                         return rc;
4375
4376                                 /* Enable access to flash interface */
4377                                 bnx2_enable_nvram_access(bp);
4378
4379                                 /* Reconfigure the flash interface */
4380                                 BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
4381                                 BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
4382                                 BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
4383                                 BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4384
4385                                 /* Disable access to flash interface */
4386                                 bnx2_disable_nvram_access(bp);
4387                                 bnx2_release_nvram_lock(bp);
4388
4389                                 break;
4390                         }
4391                 }
4392         } /* if (val & 0x40000000) */
4393
4394         if (j == entry_count) {
4395                 bp->flash_info = NULL;
4396                 pr_alert("Unknown flash/EEPROM type\n");
4397                 return -ENODEV;
4398         }
4399
4400 get_flash_size:
4401         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4402         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4403         if (val)
4404                 bp->flash_size = val;
4405         else
4406                 bp->flash_size = bp->flash_info->total_size;
4407
4408         return rc;
4409 }
4410
4411 static int
4412 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4413                 int buf_size)
4414 {
4415         int rc = 0;
4416         u32 cmd_flags, offset32, len32, extra;
4417
4418         if (buf_size == 0)
4419                 return 0;
4420
4421         /* Request access to the flash interface. */
4422         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4423                 return rc;
4424
4425         /* Enable access to flash interface */
4426         bnx2_enable_nvram_access(bp);
4427
4428         len32 = buf_size;
4429         offset32 = offset;
4430         extra = 0;
4431
4432         cmd_flags = 0;
4433
4434         if (offset32 & 3) {
4435                 u8 buf[4];
4436                 u32 pre_len;
4437
4438                 offset32 &= ~3;
4439                 pre_len = 4 - (offset & 3);
4440
4441                 if (pre_len >= len32) {
4442                         pre_len = len32;
4443                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4444                                     BNX2_NVM_COMMAND_LAST;
4445                 }
4446                 else {
4447                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4448                 }
4449
4450                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4451
4452                 if (rc)
4453                         return rc;
4454
4455                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4456
4457                 offset32 += 4;
4458                 ret_buf += pre_len;
4459                 len32 -= pre_len;
4460         }
4461         if (len32 & 3) {
4462                 extra = 4 - (len32 & 3);
4463                 len32 = (len32 + 4) & ~3;
4464         }
4465
4466         if (len32 == 4) {
4467                 u8 buf[4];
4468
4469                 if (cmd_flags)
4470                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4471                 else
4472                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4473                                     BNX2_NVM_COMMAND_LAST;
4474
4475                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4476
4477                 memcpy(ret_buf, buf, 4 - extra);
4478         }
4479         else if (len32 > 0) {
4480                 u8 buf[4];
4481
4482                 /* Read the first word. */
4483                 if (cmd_flags)
4484                         cmd_flags = 0;
4485                 else
4486                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4487
4488                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4489
4490                 /* Advance to the next dword. */
4491                 offset32 += 4;
4492                 ret_buf += 4;
4493                 len32 -= 4;
4494
4495                 while (len32 > 4 && rc == 0) {
4496                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4497
4498                         /* Advance to the next dword. */
4499                         offset32 += 4;
4500                         ret_buf += 4;
4501                         len32 -= 4;
4502                 }
4503
4504                 if (rc)
4505                         return rc;
4506
4507                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4508                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4509
4510                 memcpy(ret_buf, buf, 4 - extra);
4511         }
4512
4513         /* Disable access to flash interface */
4514         bnx2_disable_nvram_access(bp);
4515
4516         bnx2_release_nvram_lock(bp);
4517
4518         return rc;
4519 }
4520
4521 static int
4522 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4523                 int buf_size)
4524 {
4525         u32 written, offset32, len32;
4526         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4527         int rc = 0;
4528         int align_start, align_end;
4529
4530         buf = data_buf;
4531         offset32 = offset;
4532         len32 = buf_size;
4533         align_start = align_end = 0;
4534
4535         if ((align_start = (offset32 & 3))) {
4536                 offset32 &= ~3;
4537                 len32 += align_start;
4538                 if (len32 < 4)
4539                         len32 = 4;
4540                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4541                         return rc;
4542         }
4543
4544         if (len32 & 3) {
4545                 align_end = 4 - (len32 & 3);
4546                 len32 += align_end;
4547                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4548                         return rc;
4549         }
4550
4551         if (align_start || align_end) {
4552                 align_buf = kmalloc(len32, GFP_KERNEL);
4553                 if (!align_buf)
4554                         return -ENOMEM;
4555                 if (align_start) {
4556                         memcpy(align_buf, start, 4);
4557                 }
4558                 if (align_end) {
4559                         memcpy(align_buf + len32 - 4, end, 4);
4560                 }
4561                 memcpy(align_buf + align_start, data_buf, buf_size);
4562                 buf = align_buf;
4563         }
4564
4565         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4566                 flash_buffer = kmalloc(264, GFP_KERNEL);
4567                 if (!flash_buffer) {
4568                         rc = -ENOMEM;
4569                         goto nvram_write_end;
4570                 }
4571         }
4572
4573         written = 0;
4574         while ((written < len32) && (rc == 0)) {
4575                 u32 page_start, page_end, data_start, data_end;
4576                 u32 addr, cmd_flags;
4577                 int i;
4578
4579                 /* Find the page_start addr */
4580                 page_start = offset32 + written;
4581                 page_start -= (page_start % bp->flash_info->page_size);
4582                 /* Find the page_end addr */
4583                 page_end = page_start + bp->flash_info->page_size;
4584                 /* Find the data_start addr */
4585                 data_start = (written == 0) ? offset32 : page_start;
4586                 /* Find the data_end addr */
4587                 data_end = (page_end > offset32 + len32) ?
4588                         (offset32 + len32) : page_end;
4589
4590                 /* Request access to the flash interface. */
4591                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4592                         goto nvram_write_end;
4593
4594                 /* Enable access to flash interface */
4595                 bnx2_enable_nvram_access(bp);
4596
4597                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4598                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4599                         int j;
4600
4601                         /* Read the whole page into the buffer
4602                          * (non-buffer flash only) */
4603                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4604                                 if (j == (bp->flash_info->page_size - 4)) {
4605                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4606                                 }
4607                                 rc = bnx2_nvram_read_dword(bp,
4608                                         page_start + j,
4609                                         &flash_buffer[j],
4610                                         cmd_flags);
4611
4612                                 if (rc)
4613                                         goto nvram_write_end;
4614
4615                                 cmd_flags = 0;
4616                         }
4617                 }
4618
4619                 /* Enable writes to flash interface (unlock write-protect) */
4620                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4621                         goto nvram_write_end;
4622
4623                 /* Loop to write back the buffer data from page_start to
4624                  * data_start */
4625                 i = 0;
4626                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4627                         /* Erase the page */
4628                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4629                                 goto nvram_write_end;
4630
4631                         /* Re-enable the write again for the actual write */
4632                         bnx2_enable_nvram_write(bp);
4633
4634                         for (addr = page_start; addr < data_start;
4635                                 addr += 4, i += 4) {
4636
4637                                 rc = bnx2_nvram_write_dword(bp, addr,
4638                                         &flash_buffer[i], cmd_flags);
4639
4640                                 if (rc != 0)
4641                                         goto nvram_write_end;
4642
4643                                 cmd_flags = 0;
4644                         }
4645                 }
4646
4647                 /* Loop to write the new data from data_start to data_end */
4648                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4649                         if ((addr == page_end - 4) ||
4650                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4651                                  (addr == data_end - 4))) {
4652
4653                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4654                         }
4655                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4656                                 cmd_flags);
4657
4658                         if (rc != 0)
4659                                 goto nvram_write_end;
4660
4661                         cmd_flags = 0;
4662                         buf += 4;
4663                 }
4664
4665                 /* Loop to write back the buffer data from data_end
4666                  * to page_end */
4667                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4668                         for (addr = data_end; addr < page_end;
4669                                 addr += 4, i += 4) {
4670
4671                                 if (addr == page_end-4) {
4672                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4673                                 }
4674                                 rc = bnx2_nvram_write_dword(bp, addr,
4675                                         &flash_buffer[i], cmd_flags);
4676
4677                                 if (rc != 0)
4678                                         goto nvram_write_end;
4679
4680                                 cmd_flags = 0;
4681                         }
4682                 }
4683
4684                 /* Disable writes to flash interface (lock write-protect) */
4685                 bnx2_disable_nvram_write(bp);
4686
4687                 /* Disable access to flash interface */
4688                 bnx2_disable_nvram_access(bp);
4689                 bnx2_release_nvram_lock(bp);
4690
4691                 /* Increment written */
4692                 written += data_end - data_start;
4693         }
4694
4695 nvram_write_end:
4696         kfree(flash_buffer);
4697         kfree(align_buf);
4698         return rc;
4699 }
4700
4701 static void
4702 bnx2_init_fw_cap(struct bnx2 *bp)
4703 {
4704         u32 val, sig = 0;
4705
4706         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4707         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4708
4709         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4710                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4711
4712         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4713         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4714                 return;
4715
4716         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4717                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4718                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4719         }
4720
4721         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4722             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4723                 u32 link;
4724
4725                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4726
4727                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4728                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4729                         bp->phy_port = PORT_FIBRE;
4730                 else
4731                         bp->phy_port = PORT_TP;
4732
4733                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4734                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4735         }
4736
4737         if (netif_running(bp->dev) && sig)
4738                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4739 }
4740
4741 static void
4742 bnx2_setup_msix_tbl(struct bnx2 *bp)
4743 {
4744         BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4745
4746         BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4747         BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4748 }
4749
4750 static void
4751 bnx2_wait_dma_complete(struct bnx2 *bp)
4752 {
4753         u32 val;
4754         int i;
4755
4756         /*
4757          * Wait for the current PCI transaction to complete before
4758          * issuing a reset.
4759          */
4760         if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
4761             (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
4762                 BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4763                         BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4764                         BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4765                         BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4766                         BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4767                 val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4768                 udelay(5);
4769         } else {  /* 5709 */
4770                 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4771                 val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4772                 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4773                 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4774
4775                 for (i = 0; i < 100; i++) {
4776                         msleep(1);
4777                         val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4778                         if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4779                                 break;
4780                 }
4781         }
4782
4783         return;
4784 }
4785
4786
4787 static int
4788 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4789 {
4790         u32 val;
4791         int i, rc = 0;
4792         u8 old_port;
4793
4794         /* Wait for the current PCI transaction to complete before
4795          * issuing a reset. */
4796         bnx2_wait_dma_complete(bp);
4797
4798         /* Wait for the firmware to tell us it is ok to issue a reset. */
4799         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4800
4801         /* Deposit a driver reset signature so the firmware knows that
4802          * this is a soft reset. */
4803         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4804                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4805
4806         /* Do a dummy read to force the chip to complete all current transaction
4807          * before we issue a reset. */
4808         val = BNX2_RD(bp, BNX2_MISC_ID);
4809
4810         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4811                 BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4812                 BNX2_RD(bp, BNX2_MISC_COMMAND);
4813                 udelay(5);
4814
4815                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4816                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4817
4818                 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4819
4820         } else {
4821                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4822                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4823                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4824
4825                 /* Chip reset. */
4826                 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4827
4828                 /* Reading back any register after chip reset will hang the
4829                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4830                  * of margin for write posting.
4831                  */
4832                 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4833                     (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
4834                         msleep(20);
4835
4836                 /* Reset takes approximate 30 usec */
4837                 for (i = 0; i < 10; i++) {
4838                         val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4839                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4840                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4841                                 break;
4842                         udelay(10);
4843                 }
4844
4845                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4846                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4847                         pr_err("Chip reset did not complete\n");
4848                         return -EBUSY;
4849                 }
4850         }
4851
4852         /* Make sure byte swapping is properly configured. */
4853         val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
4854         if (val != 0x01020304) {
4855                 pr_err("Chip not in correct endian mode\n");
4856                 return -ENODEV;
4857         }
4858
4859         /* Wait for the firmware to finish its initialization. */
4860         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4861         if (rc)
4862                 return rc;
4863
4864         spin_lock_bh(&bp->phy_lock);
4865         old_port = bp->phy_port;
4866         bnx2_init_fw_cap(bp);
4867         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4868             old_port != bp->phy_port)
4869                 bnx2_set_default_remote_link(bp);
4870         spin_unlock_bh(&bp->phy_lock);
4871
4872         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4873                 /* Adjust the voltage regular to two steps lower.  The default
4874                  * of this register is 0x0000000e. */
4875                 BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4876
4877                 /* Remove bad rbuf memory from the free pool. */
4878                 rc = bnx2_alloc_bad_rbuf(bp);
4879         }
4880
4881         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4882                 bnx2_setup_msix_tbl(bp);
4883                 /* Prevent MSIX table reads and write from timing out */
4884                 BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
4885                         BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4886         }
4887
4888         return rc;
4889 }
4890
4891 static int
4892 bnx2_init_chip(struct bnx2 *bp)
4893 {
4894         u32 val, mtu;
4895         int rc, i;
4896
4897         /* Make sure the interrupt is not active. */
4898         BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4899
4900         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4901               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4902 #ifdef __BIG_ENDIAN
4903               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4904 #endif
4905               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4906               DMA_READ_CHANS << 12 |
4907               DMA_WRITE_CHANS << 16;
4908
4909         val |= (0x2 << 20) | (1 << 11);
4910
4911         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4912                 val |= (1 << 23);
4913
4914         if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
4915             (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
4916             !(bp->flags & BNX2_FLAG_PCIX))
4917                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4918
4919         BNX2_WR(bp, BNX2_DMA_CONFIG, val);
4920
4921         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4922                 val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
4923                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4924                 BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
4925         }
4926
4927         if (bp->flags & BNX2_FLAG_PCIX) {
4928                 u16 val16;
4929
4930                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4931                                      &val16);
4932                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4933                                       val16 & ~PCI_X_CMD_ERO);
4934         }
4935
4936         BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4937                 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4938                 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4939                 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4940
4941         /* Initialize context mapping and zero out the quick contexts.  The
4942          * context block must have already been enabled. */
4943         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4944                 rc = bnx2_init_5709_context(bp);
4945                 if (rc)
4946                         return rc;
4947         } else
4948                 bnx2_init_context(bp);
4949
4950         if ((rc = bnx2_init_cpus(bp)) != 0)
4951                 return rc;
4952
4953         bnx2_init_nvram(bp);
4954
4955         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4956
4957         val = BNX2_RD(bp, BNX2_MQ_CONFIG);
4958         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4959         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4960         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4961                 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4962                 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
4963                         val |= BNX2_MQ_CONFIG_HALT_DIS;
4964         }
4965
4966         BNX2_WR(bp, BNX2_MQ_CONFIG, val);
4967
4968         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4969         BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4970         BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4971
4972         val = (BNX2_PAGE_BITS - 8) << 24;
4973         BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4974
4975         /* Configure page size. */
4976         val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4977         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4978         val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
4979         BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4980
4981         val = bp->mac_addr[0] +
4982               (bp->mac_addr[1] << 8) +
4983               (bp->mac_addr[2] << 16) +
4984               bp->mac_addr[3] +
4985               (bp->mac_addr[4] << 8) +
4986               (bp->mac_addr[5] << 16);
4987         BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4988
4989         /* Program the MTU.  Also include 4 bytes for CRC32. */
4990         mtu = bp->dev->mtu;
4991         val = mtu + ETH_HLEN + ETH_FCS_LEN;
4992         if (val > (MAX_ETHERNET_PACKET_SIZE + ETH_HLEN + 4))
4993                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4994         BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4995
4996         if (mtu < ETH_DATA_LEN)
4997                 mtu = ETH_DATA_LEN;
4998
4999         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
5000         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
5001         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
5002
5003         memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
5004         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5005                 bp->bnx2_napi[i].last_status_idx = 0;
5006
5007         bp->idle_chk_status_idx = 0xffff;
5008
5009         /* Set up how to generate a link change interrupt. */
5010         BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
5011
5012         BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
5013                 (u64) bp->status_blk_mapping & 0xffffffff);
5014         BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
5015
5016         BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
5017                 (u64) bp->stats_blk_mapping & 0xffffffff);
5018         BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
5019                 (u64) bp->stats_blk_mapping >> 32);
5020
5021         BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
5022                 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
5023
5024         BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
5025                 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
5026
5027         BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
5028                 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
5029
5030         BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
5031
5032         BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
5033
5034         BNX2_WR(bp, BNX2_HC_COM_TICKS,
5035                 (bp->com_ticks_int << 16) | bp->com_ticks);
5036
5037         BNX2_WR(bp, BNX2_HC_CMD_TICKS,
5038                 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
5039
5040         if (bp->flags & BNX2_FLAG_BROKEN_STATS)
5041                 BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
5042         else
5043                 BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
5044         BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
5045
5046         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
5047                 val = BNX2_HC_CONFIG_COLLECT_STATS;
5048         else {
5049                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
5050                       BNX2_HC_CONFIG_COLLECT_STATS;
5051         }
5052
5053         if (bp->flags & BNX2_FLAG_USING_MSIX) {
5054                 BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
5055                         BNX2_HC_MSIX_BIT_VECTOR_VAL);
5056
5057                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
5058         }
5059
5060         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
5061                 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
5062
5063         BNX2_WR(bp, BNX2_HC_CONFIG, val);
5064
5065         if (bp->rx_ticks < 25)
5066                 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5067         else
5068                 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5069
5070         for (i = 1; i < bp->irq_nvecs; i++) {
5071                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5072                            BNX2_HC_SB_CONFIG_1;
5073
5074                 BNX2_WR(bp, base,
5075                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5076                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5077                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5078
5079                 BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5080                         (bp->tx_quick_cons_trip_int << 16) |
5081                          bp->tx_quick_cons_trip);
5082
5083                 BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5084                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
5085
5086                 BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5087                         (bp->rx_quick_cons_trip_int << 16) |
5088                         bp->rx_quick_cons_trip);
5089
5090                 BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5091                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
5092         }
5093
5094         /* Clear internal stats counters. */
5095         BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5096
5097         BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5098
5099         /* Initialize the receive filter. */
5100         bnx2_set_rx_mode(bp->dev);
5101
5102         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5103                 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5104                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5105                 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5106         }
5107         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5108                           1, 0);
5109
5110         BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5111         BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5112
5113         udelay(20);
5114
5115         bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
5116
5117         return rc;
5118 }
5119
5120 static void
5121 bnx2_clear_ring_states(struct bnx2 *bp)
5122 {
5123         struct bnx2_napi *bnapi;
5124         struct bnx2_tx_ring_info *txr;
5125         struct bnx2_rx_ring_info *rxr;
5126         int i;
5127
5128         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5129                 bnapi = &bp->bnx2_napi[i];
5130                 txr = &bnapi->tx_ring;
5131                 rxr = &bnapi->rx_ring;
5132
5133                 txr->tx_cons = 0;
5134                 txr->hw_tx_cons = 0;
5135                 rxr->rx_prod_bseq = 0;
5136                 rxr->rx_prod = 0;
5137                 rxr->rx_cons = 0;
5138                 rxr->rx_pg_prod = 0;
5139                 rxr->rx_pg_cons = 0;
5140         }
5141 }
5142
5143 static void
5144 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5145 {
5146         u32 val, offset0, offset1, offset2, offset3;
5147         u32 cid_addr = GET_CID_ADDR(cid);
5148
5149         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5150                 offset0 = BNX2_L2CTX_TYPE_XI;
5151                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5152                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5153                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5154         } else {
5155                 offset0 = BNX2_L2CTX_TYPE;
5156                 offset1 = BNX2_L2CTX_CMD_TYPE;
5157                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5158                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5159         }
5160         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5161         bnx2_ctx_wr(bp, cid_addr, offset0, val);
5162
5163         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5164         bnx2_ctx_wr(bp, cid_addr, offset1, val);
5165
5166         val = (u64) txr->tx_desc_mapping >> 32;
5167         bnx2_ctx_wr(bp, cid_addr, offset2, val);
5168
5169         val = (u64) txr->tx_desc_mapping & 0xffffffff;
5170         bnx2_ctx_wr(bp, cid_addr, offset3, val);
5171 }
5172
5173 static void
5174 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5175 {
5176         struct bnx2_tx_bd *txbd;
5177         u32 cid = TX_CID;
5178         struct bnx2_napi *bnapi;
5179         struct bnx2_tx_ring_info *txr;
5180
5181         bnapi = &bp->bnx2_napi[ring_num];
5182         txr = &bnapi->tx_ring;
5183
5184         if (ring_num == 0)
5185                 cid = TX_CID;
5186         else
5187                 cid = TX_TSS_CID + ring_num - 1;
5188
5189         bp->tx_wake_thresh = bp->tx_ring_size / 2;
5190
5191         txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
5192
5193         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5194         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5195
5196         txr->tx_prod = 0;
5197         txr->tx_prod_bseq = 0;
5198
5199         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5200         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5201
5202         bnx2_init_tx_context(bp, cid, txr);
5203 }
5204
5205 static void
5206 bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
5207                      u32 buf_size, int num_rings)
5208 {
5209         int i;
5210         struct bnx2_rx_bd *rxbd;
5211
5212         for (i = 0; i < num_rings; i++) {
5213                 int j;
5214
5215                 rxbd = &rx_ring[i][0];
5216                 for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
5217                         rxbd->rx_bd_len = buf_size;
5218                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5219                 }
5220                 if (i == (num_rings - 1))
5221                         j = 0;
5222                 else
5223                         j = i + 1;
5224                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5225                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5226         }
5227 }
5228
5229 static void
5230 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5231 {
5232         int i;
5233         u16 prod, ring_prod;
5234         u32 cid, rx_cid_addr, val;
5235         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5236         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5237
5238         if (ring_num == 0)
5239                 cid = RX_CID;
5240         else
5241                 cid = RX_RSS_CID + ring_num - 1;
5242
5243         rx_cid_addr = GET_CID_ADDR(cid);
5244
5245         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5246                              bp->rx_buf_use_size, bp->rx_max_ring);
5247
5248         bnx2_init_rx_context(bp, cid);
5249
5250         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5251                 val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
5252                 BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5253         }
5254
5255         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5256         if (bp->rx_pg_ring_size) {
5257                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5258                                      rxr->rx_pg_desc_mapping,
5259                                      PAGE_SIZE, bp->rx_max_pg_ring);
5260                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5261                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5262                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5263                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5264
5265                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5266                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5267
5268                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5269                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5270
5271                 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5272                         BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5273         }
5274
5275         val = (u64) rxr->rx_desc_mapping[0] >> 32;
5276         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5277
5278         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5279         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5280
5281         ring_prod = prod = rxr->rx_pg_prod;
5282         for (i = 0; i < bp->rx_pg_ring_size; i++) {
5283                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5284                         netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5285                                     ring_num, i, bp->rx_pg_ring_size);
5286                         break;
5287                 }
5288                 prod = BNX2_NEXT_RX_BD(prod);
5289                 ring_prod = BNX2_RX_PG_RING_IDX(prod);
5290         }
5291         rxr->rx_pg_prod = prod;
5292
5293         ring_prod = prod = rxr->rx_prod;
5294         for (i = 0; i < bp->rx_ring_size; i++) {
5295                 if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5296                         netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5297                                     ring_num, i, bp->rx_ring_size);
5298                         break;
5299                 }
5300                 prod = BNX2_NEXT_RX_BD(prod);
5301                 ring_prod = BNX2_RX_RING_IDX(prod);
5302         }
5303         rxr->rx_prod = prod;
5304
5305         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5306         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5307         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5308
5309         BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5310         BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
5311
5312         BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5313 }
5314
5315 static void
5316 bnx2_init_all_rings(struct bnx2 *bp)
5317 {
5318         int i;
5319         u32 val;
5320
5321         bnx2_clear_ring_states(bp);
5322
5323         BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5324         for (i = 0; i < bp->num_tx_rings; i++)
5325                 bnx2_init_tx_ring(bp, i);
5326
5327         if (bp->num_tx_rings > 1)
5328                 BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5329                         (TX_TSS_CID << 7));
5330
5331         BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5332         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5333
5334         for (i = 0; i < bp->num_rx_rings; i++)
5335                 bnx2_init_rx_ring(bp, i);
5336
5337         if (bp->num_rx_rings > 1) {
5338                 u32 tbl_32 = 0;
5339
5340                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5341                         int shift = (i % 8) << 2;
5342
5343                         tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5344                         if ((i % 8) == 7) {
5345                                 BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5346                                 BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5347                                         BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5348                                         BNX2_RLUP_RSS_COMMAND_WRITE |
5349                                         BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5350                                 tbl_32 = 0;
5351                         }
5352                 }
5353
5354                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5355                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5356
5357                 BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5358
5359         }
5360 }
5361
5362 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5363 {
5364         u32 max, num_rings = 1;
5365
5366         while (ring_size > BNX2_MAX_RX_DESC_CNT) {
5367                 ring_size -= BNX2_MAX_RX_DESC_CNT;
5368                 num_rings++;
5369         }
5370         /* round to next power of 2 */
5371         max = max_size;
5372         while ((max & num_rings) == 0)
5373                 max >>= 1;
5374
5375         if (num_rings != max)
5376                 max <<= 1;
5377
5378         return max;
5379 }
5380
5381 static void
5382 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5383 {
5384         u32 rx_size, rx_space, jumbo_size;
5385
5386         /* 8 for CRC and VLAN */
5387         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5388
5389         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5390                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5391
5392         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5393         bp->rx_pg_ring_size = 0;
5394         bp->rx_max_pg_ring = 0;
5395         bp->rx_max_pg_ring_idx = 0;
5396         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5397                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5398
5399                 jumbo_size = size * pages;
5400                 if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
5401                         jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
5402
5403                 bp->rx_pg_ring_size = jumbo_size;
5404                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5405                                                         BNX2_MAX_RX_PG_RINGS);
5406                 bp->rx_max_pg_ring_idx =
5407                         (bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
5408                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5409                 bp->rx_copy_thresh = 0;
5410         }
5411
5412         bp->rx_buf_use_size = rx_size;
5413         /* hw alignment + build_skb() overhead*/
5414         bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5415                 NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5416         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5417         bp->rx_ring_size = size;
5418         bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
5419         bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
5420 }
5421
5422 static void
5423 bnx2_free_tx_skbs(struct bnx2 *bp)
5424 {
5425         int i;
5426
5427         for (i = 0; i < bp->num_tx_rings; i++) {
5428                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5429                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5430                 int j;
5431
5432                 if (!txr->tx_buf_ring)
5433                         continue;
5434
5435                 for (j = 0; j < BNX2_TX_DESC_CNT; ) {
5436                         struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5437                         struct sk_buff *skb = tx_buf->skb;
5438                         int k, last;
5439
5440                         if (!skb) {
5441                                 j = BNX2_NEXT_TX_BD(j);
5442                                 continue;
5443                         }
5444
5445                         dma_unmap_single(&bp->pdev->dev,
5446                                          dma_unmap_addr(tx_buf, mapping),
5447                                          skb_headlen(skb),
5448                                          PCI_DMA_TODEVICE);
5449
5450                         tx_buf->skb = NULL;
5451
5452                         last = tx_buf->nr_frags;
5453                         j = BNX2_NEXT_TX_BD(j);
5454                         for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
5455                                 tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
5456                                 dma_unmap_page(&bp->pdev->dev,
5457                                         dma_unmap_addr(tx_buf, mapping),
5458                                         skb_frag_size(&skb_shinfo(skb)->frags[k]),
5459                                         PCI_DMA_TODEVICE);
5460                         }
5461                         dev_kfree_skb(skb);
5462                 }
5463                 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5464         }
5465 }
5466
5467 static void
5468 bnx2_free_rx_skbs(struct bnx2 *bp)
5469 {
5470         int i;
5471
5472         for (i = 0; i < bp->num_rx_rings; i++) {
5473                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5474                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5475                 int j;
5476
5477                 if (!rxr->rx_buf_ring)
5478                         return;
5479
5480                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5481                         struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5482                         u8 *data = rx_buf->data;
5483
5484                         if (!data)
5485                                 continue;
5486
5487                         dma_unmap_single(&bp->pdev->dev,
5488                                          dma_unmap_addr(rx_buf, mapping),
5489                                          bp->rx_buf_use_size,
5490                                          PCI_DMA_FROMDEVICE);
5491
5492                         rx_buf->data = NULL;
5493
5494                         kfree(data);
5495                 }
5496                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5497                         bnx2_free_rx_page(bp, rxr, j);
5498         }
5499 }
5500
5501 static void
5502 bnx2_free_skbs(struct bnx2 *bp)
5503 {
5504         bnx2_free_tx_skbs(bp);
5505         bnx2_free_rx_skbs(bp);
5506 }
5507
5508 static int
5509 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5510 {
5511         int rc;
5512
5513         rc = bnx2_reset_chip(bp, reset_code);
5514         bnx2_free_skbs(bp);
5515         if (rc)
5516                 return rc;
5517
5518         if ((rc = bnx2_init_chip(bp)) != 0)
5519                 return rc;
5520
5521         bnx2_init_all_rings(bp);
5522         return 0;
5523 }
5524
5525 static int
5526 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5527 {
5528         int rc;
5529
5530         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5531                 return rc;
5532
5533         spin_lock_bh(&bp->phy_lock);
5534         bnx2_init_phy(bp, reset_phy);
5535         bnx2_set_link(bp);
5536         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5537                 bnx2_remote_phy_event(bp);
5538         spin_unlock_bh(&bp->phy_lock);
5539         return 0;
5540 }
5541
5542 static int
5543 bnx2_shutdown_chip(struct bnx2 *bp)
5544 {
5545         u32 reset_code;
5546
5547         if (bp->flags & BNX2_FLAG_NO_WOL)
5548                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5549         else if (bp->wol)
5550                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5551         else
5552                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5553
5554         return bnx2_reset_chip(bp, reset_code);
5555 }
5556
5557 static int
5558 bnx2_test_registers(struct bnx2 *bp)
5559 {
5560         int ret;
5561         int i, is_5709;
5562         static const struct {
5563                 u16   offset;
5564                 u16   flags;
5565 #define BNX2_FL_NOT_5709        1
5566                 u32   rw_mask;
5567                 u32   ro_mask;
5568         } reg_tbl[] = {
5569                 { 0x006c, 0, 0x00000000, 0x0000003f },
5570                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5571                 { 0x0094, 0, 0x00000000, 0x00000000 },
5572
5573                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5574                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5575                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5576                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5577                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5578                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5579                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5580                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5581                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5582
5583                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5584                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5585                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5586                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5587                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5588                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5589
5590                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5591                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5592                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5593
5594                 { 0x1000, 0, 0x00000000, 0x00000001 },
5595                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5596
5597                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5598                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5599                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5600                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5601                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5602                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5603                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5604                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5605                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5606                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5607
5608                 { 0x1800, 0, 0x00000000, 0x00000001 },
5609                 { 0x1804, 0, 0x00000000, 0x00000003 },
5610
5611                 { 0x2800, 0, 0x00000000, 0x00000001 },
5612                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5613                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5614                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5615                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5616                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5617                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5618                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5619                 { 0x2840, 0, 0x00000000, 0xffffffff },
5620                 { 0x2844, 0, 0x00000000, 0xffffffff },
5621                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5622                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5623
5624                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5625                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5626
5627                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5628                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5629                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5630                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5631                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5632                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5633                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5634                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5635                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5636
5637                 { 0x5004, 0, 0x00000000, 0x0000007f },
5638                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5639
5640                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5641                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5642                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5643                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5644                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5645                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5646                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5647                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5648                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5649
5650                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5651                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5652                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5653                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5654                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5655                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5656                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5657                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5658                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5659                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5660                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5661                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5662                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5663                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5664                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5665                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5666                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5667                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5668                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5669                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5670                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5671                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5672                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5673
5674                 { 0xffff, 0, 0x00000000, 0x00000000 },
5675         };
5676
5677         ret = 0;
5678         is_5709 = 0;
5679         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5680                 is_5709 = 1;
5681
5682         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5683                 u32 offset, rw_mask, ro_mask, save_val, val;
5684                 u16 flags = reg_tbl[i].flags;
5685
5686                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5687                         continue;
5688
5689                 offset = (u32) reg_tbl[i].offset;
5690                 rw_mask = reg_tbl[i].rw_mask;
5691                 ro_mask = reg_tbl[i].ro_mask;
5692
5693                 save_val = readl(bp->regview + offset);
5694
5695                 writel(0, bp->regview + offset);
5696
5697                 val = readl(bp->regview + offset);
5698                 if ((val & rw_mask) != 0) {
5699                         goto reg_test_err;
5700                 }
5701
5702                 if ((val & ro_mask) != (save_val & ro_mask)) {
5703                         goto reg_test_err;
5704                 }
5705
5706                 writel(0xffffffff, bp->regview + offset);
5707
5708                 val = readl(bp->regview + offset);
5709                 if ((val & rw_mask) != rw_mask) {
5710                         goto reg_test_err;
5711                 }
5712
5713                 if ((val & ro_mask) != (save_val & ro_mask)) {
5714                         goto reg_test_err;
5715                 }
5716
5717                 writel(save_val, bp->regview + offset);
5718                 continue;
5719
5720 reg_test_err:
5721                 writel(save_val, bp->regview + offset);
5722                 ret = -ENODEV;
5723                 break;
5724         }
5725         return ret;
5726 }
5727
5728 static int
5729 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5730 {
5731         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5732                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5733         int i;
5734
5735         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5736                 u32 offset;
5737
5738                 for (offset = 0; offset < size; offset += 4) {
5739
5740                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5741
5742                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5743                                 test_pattern[i]) {
5744                                 return -ENODEV;
5745                         }
5746                 }
5747         }
5748         return 0;
5749 }
5750
5751 static int
5752 bnx2_test_memory(struct bnx2 *bp)
5753 {
5754         int ret = 0;
5755         int i;
5756         static struct mem_entry {
5757                 u32   offset;
5758                 u32   len;
5759         } mem_tbl_5706[] = {
5760                 { 0x60000,  0x4000 },
5761                 { 0xa0000,  0x3000 },
5762                 { 0xe0000,  0x4000 },
5763                 { 0x120000, 0x4000 },
5764                 { 0x1a0000, 0x4000 },
5765                 { 0x160000, 0x4000 },
5766                 { 0xffffffff, 0    },
5767         },
5768         mem_tbl_5709[] = {
5769                 { 0x60000,  0x4000 },
5770                 { 0xa0000,  0x3000 },
5771                 { 0xe0000,  0x4000 },
5772                 { 0x120000, 0x4000 },
5773                 { 0x1a0000, 0x4000 },
5774                 { 0xffffffff, 0    },
5775         };
5776         struct mem_entry *mem_tbl;
5777
5778         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5779                 mem_tbl = mem_tbl_5709;
5780         else
5781                 mem_tbl = mem_tbl_5706;
5782
5783         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5784                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5785                         mem_tbl[i].len)) != 0) {
5786                         return ret;
5787                 }
5788         }
5789
5790         return ret;
5791 }
5792
5793 #define BNX2_MAC_LOOPBACK       0
5794 #define BNX2_PHY_LOOPBACK       1
5795
5796 static int
5797 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5798 {
5799         unsigned int pkt_size, num_pkts, i;
5800         struct sk_buff *skb;
5801         u8 *data;
5802         unsigned char *packet;
5803         u16 rx_start_idx, rx_idx;
5804         dma_addr_t map;
5805         struct bnx2_tx_bd *txbd;
5806         struct bnx2_sw_bd *rx_buf;
5807         struct l2_fhdr *rx_hdr;
5808         int ret = -ENODEV;
5809         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5810         struct bnx2_tx_ring_info *txr;
5811         struct bnx2_rx_ring_info *rxr;
5812
5813         tx_napi = bnapi;
5814
5815         txr = &tx_napi->tx_ring;
5816         rxr = &bnapi->rx_ring;
5817         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5818                 bp->loopback = MAC_LOOPBACK;
5819                 bnx2_set_mac_loopback(bp);
5820         }
5821         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5822                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5823                         return 0;
5824
5825                 bp->loopback = PHY_LOOPBACK;
5826                 bnx2_set_phy_loopback(bp);
5827         }
5828         else
5829                 return -EINVAL;
5830
5831         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5832         skb = netdev_alloc_skb(bp->dev, pkt_size);
5833         if (!skb)
5834                 return -ENOMEM;
5835         packet = skb_put(skb, pkt_size);
5836         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
5837         memset(packet + ETH_ALEN, 0x0, 8);
5838         for (i = 14; i < pkt_size; i++)
5839                 packet[i] = (unsigned char) (i & 0xff);
5840
5841         map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5842                              PCI_DMA_TODEVICE);
5843         if (dma_mapping_error(&bp->pdev->dev, map)) {
5844                 dev_kfree_skb(skb);
5845                 return -EIO;
5846         }
5847
5848         BNX2_WR(bp, BNX2_HC_COMMAND,
5849                 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5850
5851         BNX2_RD(bp, BNX2_HC_COMMAND);
5852
5853         udelay(5);
5854         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5855
5856         num_pkts = 0;
5857
5858         txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
5859
5860         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5861         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5862         txbd->tx_bd_mss_nbytes = pkt_size;
5863         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5864
5865         num_pkts++;
5866         txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
5867         txr->tx_prod_bseq += pkt_size;
5868
5869         BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5870         BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5871
5872         udelay(100);
5873
5874         BNX2_WR(bp, BNX2_HC_COMMAND,
5875                 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5876
5877         BNX2_RD(bp, BNX2_HC_COMMAND);
5878
5879         udelay(5);
5880
5881         dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5882         dev_kfree_skb(skb);
5883
5884         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5885                 goto loopback_test_done;
5886
5887         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5888         if (rx_idx != rx_start_idx + num_pkts) {
5889                 goto loopback_test_done;
5890         }
5891
5892         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5893         data = rx_buf->data;
5894
5895         rx_hdr = get_l2_fhdr(data);
5896         data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5897
5898         dma_sync_single_for_cpu(&bp->pdev->dev,
5899                 dma_unmap_addr(rx_buf, mapping),
5900                 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
5901
5902         if (rx_hdr->l2_fhdr_status &
5903                 (L2_FHDR_ERRORS_BAD_CRC |
5904                 L2_FHDR_ERRORS_PHY_DECODE |
5905                 L2_FHDR_ERRORS_ALIGNMENT |
5906                 L2_FHDR_ERRORS_TOO_SHORT |
5907                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5908
5909                 goto loopback_test_done;
5910         }
5911
5912         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5913                 goto loopback_test_done;
5914         }
5915
5916         for (i = 14; i < pkt_size; i++) {
5917                 if (*(data + i) != (unsigned char) (i & 0xff)) {
5918                         goto loopback_test_done;
5919                 }
5920         }
5921
5922         ret = 0;
5923
5924 loopback_test_done:
5925         bp->loopback = 0;
5926         return ret;
5927 }
5928
5929 #define BNX2_MAC_LOOPBACK_FAILED        1
5930 #define BNX2_PHY_LOOPBACK_FAILED        2
5931 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5932                                          BNX2_PHY_LOOPBACK_FAILED)
5933
5934 static int
5935 bnx2_test_loopback(struct bnx2 *bp)
5936 {
5937         int rc = 0;
5938
5939         if (!netif_running(bp->dev))
5940                 return BNX2_LOOPBACK_FAILED;
5941
5942         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5943         spin_lock_bh(&bp->phy_lock);
5944         bnx2_init_phy(bp, 1);
5945         spin_unlock_bh(&bp->phy_lock);
5946         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5947                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5948         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5949                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5950         return rc;
5951 }
5952
5953 #define NVRAM_SIZE 0x200
5954 #define CRC32_RESIDUAL 0xdebb20e3
5955
5956 static int
5957 bnx2_test_nvram(struct bnx2 *bp)
5958 {
5959         __be32 buf[NVRAM_SIZE / 4];
5960         u8 *data = (u8 *) buf;
5961         int rc = 0;
5962         u32 magic, csum;
5963
5964         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5965                 goto test_nvram_done;
5966
5967         magic = be32_to_cpu(buf[0]);
5968         if (magic != 0x669955aa) {
5969                 rc = -ENODEV;
5970                 goto test_nvram_done;
5971         }
5972
5973         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5974                 goto test_nvram_done;
5975
5976         csum = ether_crc_le(0x100, data);
5977         if (csum != CRC32_RESIDUAL) {
5978                 rc = -ENODEV;
5979                 goto test_nvram_done;
5980         }
5981
5982         csum = ether_crc_le(0x100, data + 0x100);
5983         if (csum != CRC32_RESIDUAL) {
5984                 rc = -ENODEV;
5985         }
5986
5987 test_nvram_done:
5988         return rc;
5989 }
5990
5991 static int
5992 bnx2_test_link(struct bnx2 *bp)
5993 {
5994         u32 bmsr;
5995
5996         if (!netif_running(bp->dev))
5997                 return -ENODEV;
5998
5999         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6000                 if (bp->link_up)
6001                         return 0;
6002                 return -ENODEV;
6003         }
6004         spin_lock_bh(&bp->phy_lock);
6005         bnx2_enable_bmsr1(bp);
6006         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
6007         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
6008         bnx2_disable_bmsr1(bp);
6009         spin_unlock_bh(&bp->phy_lock);
6010
6011         if (bmsr & BMSR_LSTATUS) {
6012                 return 0;
6013         }
6014         return -ENODEV;
6015 }
6016
6017 static int
6018 bnx2_test_intr(struct bnx2 *bp)
6019 {
6020         int i;
6021         u16 status_idx;
6022
6023         if (!netif_running(bp->dev))
6024                 return -ENODEV;
6025
6026         status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
6027
6028         /* This register is not touched during run-time. */
6029         BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
6030         BNX2_RD(bp, BNX2_HC_COMMAND);
6031
6032         for (i = 0; i < 10; i++) {
6033                 if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
6034                         status_idx) {
6035
6036                         break;
6037                 }
6038
6039                 msleep_interruptible(10);
6040         }
6041         if (i < 10)
6042                 return 0;
6043
6044         return -ENODEV;
6045 }
6046
6047 /* Determining link for parallel detection. */
6048 static int
6049 bnx2_5706_serdes_has_link(struct bnx2 *bp)
6050 {
6051         u32 mode_ctl, an_dbg, exp;
6052
6053         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
6054                 return 0;
6055
6056         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
6057         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
6058
6059         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
6060                 return 0;
6061
6062         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6063         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6064         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6065
6066         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
6067                 return 0;
6068
6069         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
6070         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6071         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6072
6073         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
6074                 return 0;
6075
6076         return 1;
6077 }
6078
6079 static void
6080 bnx2_5706_serdes_timer(struct bnx2 *bp)
6081 {
6082         int check_link = 1;
6083
6084         spin_lock(&bp->phy_lock);
6085         if (bp->serdes_an_pending) {
6086                 bp->serdes_an_pending--;
6087                 check_link = 0;
6088         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6089                 u32 bmcr;
6090
6091                 bp->current_interval = BNX2_TIMER_INTERVAL;
6092
6093                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6094
6095                 if (bmcr & BMCR_ANENABLE) {
6096                         if (bnx2_5706_serdes_has_link(bp)) {
6097                                 bmcr &= ~BMCR_ANENABLE;
6098                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6099                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6100                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6101                         }
6102                 }
6103         }
6104         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6105                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6106                 u32 phy2;
6107
6108                 bnx2_write_phy(bp, 0x17, 0x0f01);
6109                 bnx2_read_phy(bp, 0x15, &phy2);
6110                 if (phy2 & 0x20) {
6111                         u32 bmcr;
6112
6113                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6114                         bmcr |= BMCR_ANENABLE;
6115                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6116
6117                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6118                 }
6119         } else
6120                 bp->current_interval = BNX2_TIMER_INTERVAL;
6121
6122         if (check_link) {
6123                 u32 val;
6124
6125                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6126                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6127                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6128
6129                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6130                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6131                                 bnx2_5706s_force_link_dn(bp, 1);
6132                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6133                         } else
6134                                 bnx2_set_link(bp);
6135                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6136                         bnx2_set_link(bp);
6137         }
6138         spin_unlock(&bp->phy_lock);
6139 }
6140
6141 static void
6142 bnx2_5708_serdes_timer(struct bnx2 *bp)
6143 {
6144         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6145                 return;
6146
6147         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6148                 bp->serdes_an_pending = 0;
6149                 return;
6150         }
6151
6152         spin_lock(&bp->phy_lock);
6153         if (bp->serdes_an_pending)
6154                 bp->serdes_an_pending--;
6155         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6156                 u32 bmcr;
6157
6158                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6159                 if (bmcr & BMCR_ANENABLE) {
6160                         bnx2_enable_forced_2g5(bp);
6161                         bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6162                 } else {
6163                         bnx2_disable_forced_2g5(bp);
6164                         bp->serdes_an_pending = 2;
6165                         bp->current_interval = BNX2_TIMER_INTERVAL;
6166                 }
6167
6168         } else
6169                 bp->current_interval = BNX2_TIMER_INTERVAL;
6170
6171         spin_unlock(&bp->phy_lock);
6172 }
6173
6174 static void
6175 bnx2_timer(struct timer_list *t)
6176 {
6177         struct bnx2 *bp = from_timer(bp, t, timer);
6178
6179         if (!netif_running(bp->dev))
6180                 return;
6181
6182         if (atomic_read(&bp->intr_sem) != 0)
6183                 goto bnx2_restart_timer;
6184
6185         if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6186              BNX2_FLAG_USING_MSI)
6187                 bnx2_chk_missed_msi(bp);
6188
6189         bnx2_send_heart_beat(bp);
6190
6191         bp->stats_blk->stat_FwRxDrop =
6192                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6193
6194         /* workaround occasional corrupted counters */
6195         if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6196                 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6197                         BNX2_HC_COMMAND_STATS_NOW);
6198
6199         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6200                 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
6201                         bnx2_5706_serdes_timer(bp);
6202                 else
6203                         bnx2_5708_serdes_timer(bp);
6204         }
6205
6206 bnx2_restart_timer:
6207         mod_timer(&bp->timer, jiffies + bp->current_interval);
6208 }
6209
6210 static int
6211 bnx2_request_irq(struct bnx2 *bp)
6212 {
6213         unsigned long flags;
6214         struct bnx2_irq *irq;
6215         int rc = 0, i;
6216
6217         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6218                 flags = 0;
6219         else
6220                 flags = IRQF_SHARED;
6221
6222         for (i = 0; i < bp->irq_nvecs; i++) {
6223                 irq = &bp->irq_tbl[i];
6224                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6225                                  &bp->bnx2_napi[i]);
6226                 if (rc)
6227                         break;
6228                 irq->requested = 1;
6229         }
6230         return rc;
6231 }
6232
6233 static void
6234 __bnx2_free_irq(struct bnx2 *bp)
6235 {
6236         struct bnx2_irq *irq;
6237         int i;
6238
6239         for (i = 0; i < bp->irq_nvecs; i++) {
6240                 irq = &bp->irq_tbl[i];
6241                 if (irq->requested)
6242                         free_irq(irq->vector, &bp->bnx2_napi[i]);
6243                 irq->requested = 0;
6244         }
6245 }
6246
6247 static void
6248 bnx2_free_irq(struct bnx2 *bp)
6249 {
6250
6251         __bnx2_free_irq(bp);
6252         if (bp->flags & BNX2_FLAG_USING_MSI)
6253                 pci_disable_msi(bp->pdev);
6254         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6255                 pci_disable_msix(bp->pdev);
6256
6257         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6258 }
6259
6260 static void
6261 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6262 {
6263         int i, total_vecs;
6264         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6265         struct net_device *dev = bp->dev;
6266         const int len = sizeof(bp->irq_tbl[0].name);
6267
6268         bnx2_setup_msix_tbl(bp);
6269         BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6270         BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6271         BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6272
6273         /*  Need to flush the previous three writes to ensure MSI-X
6274          *  is setup properly */
6275         BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
6276
6277         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6278                 msix_ent[i].entry = i;
6279                 msix_ent[i].vector = 0;
6280         }
6281
6282         total_vecs = msix_vecs;
6283 #ifdef BCM_CNIC
6284         total_vecs++;
6285 #endif
6286         total_vecs = pci_enable_msix_range(bp->pdev, msix_ent,
6287                                            BNX2_MIN_MSIX_VEC, total_vecs);
6288         if (total_vecs < 0)
6289                 return;
6290
6291         msix_vecs = total_vecs;
6292 #ifdef BCM_CNIC
6293         msix_vecs--;
6294 #endif
6295         bp->irq_nvecs = msix_vecs;
6296         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6297         for (i = 0; i < total_vecs; i++) {
6298                 bp->irq_tbl[i].vector = msix_ent[i].vector;
6299                 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6300                 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6301         }
6302 }
6303
6304 static int
6305 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6306 {
6307         int cpus = netif_get_num_default_rss_queues();
6308         int msix_vecs;
6309
6310         if (!bp->num_req_rx_rings)
6311                 msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6312         else if (!bp->num_req_tx_rings)
6313                 msix_vecs = max(cpus, bp->num_req_rx_rings);
6314         else
6315                 msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6316
6317         msix_vecs = min(msix_vecs, RX_MAX_RINGS);
6318
6319         bp->irq_tbl[0].handler = bnx2_interrupt;
6320         strcpy(bp->irq_tbl[0].name, bp->dev->name);
6321         bp->irq_nvecs = 1;
6322         bp->irq_tbl[0].vector = bp->pdev->irq;
6323
6324         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6325                 bnx2_enable_msix(bp, msix_vecs);
6326
6327         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6328             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6329                 if (pci_enable_msi(bp->pdev) == 0) {
6330                         bp->flags |= BNX2_FLAG_USING_MSI;
6331                         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
6332                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6333                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6334                         } else
6335                                 bp->irq_tbl[0].handler = bnx2_msi;
6336
6337                         bp->irq_tbl[0].vector = bp->pdev->irq;
6338                 }
6339         }
6340
6341         if (!bp->num_req_tx_rings)
6342                 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6343         else
6344                 bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6345
6346         if (!bp->num_req_rx_rings)
6347                 bp->num_rx_rings = bp->irq_nvecs;
6348         else
6349                 bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6350
6351         netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6352
6353         return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6354 }
6355
6356 /* Called with rtnl_lock */
6357 static int
6358 bnx2_open(struct net_device *dev)
6359 {
6360         struct bnx2 *bp = netdev_priv(dev);
6361         int rc;
6362
6363         rc = bnx2_request_firmware(bp);
6364         if (rc < 0)
6365                 goto out;
6366
6367         netif_carrier_off(dev);
6368
6369         bnx2_disable_int(bp);
6370
6371         rc = bnx2_setup_int_mode(bp, disable_msi);
6372         if (rc)
6373                 goto open_err;
6374         bnx2_init_napi(bp);
6375         bnx2_napi_enable(bp);
6376         rc = bnx2_alloc_mem(bp);
6377         if (rc)
6378                 goto open_err;
6379
6380         rc = bnx2_request_irq(bp);
6381         if (rc)
6382                 goto open_err;
6383
6384         rc = bnx2_init_nic(bp, 1);
6385         if (rc)
6386                 goto open_err;
6387
6388         mod_timer(&bp->timer, jiffies + bp->current_interval);
6389
6390         atomic_set(&bp->intr_sem, 0);
6391
6392         memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6393
6394         bnx2_enable_int(bp);
6395
6396         if (bp->flags & BNX2_FLAG_USING_MSI) {
6397                 /* Test MSI to make sure it is working
6398                  * If MSI test fails, go back to INTx mode
6399                  */
6400                 if (bnx2_test_intr(bp) != 0) {
6401                         netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6402
6403                         bnx2_disable_int(bp);
6404                         bnx2_free_irq(bp);
6405
6406                         bnx2_setup_int_mode(bp, 1);
6407
6408                         rc = bnx2_init_nic(bp, 0);
6409
6410                         if (!rc)
6411                                 rc = bnx2_request_irq(bp);
6412
6413                         if (rc) {
6414                                 del_timer_sync(&bp->timer);
6415                                 goto open_err;
6416                         }
6417                         bnx2_enable_int(bp);
6418                 }
6419         }
6420         if (bp->flags & BNX2_FLAG_USING_MSI)
6421                 netdev_info(dev, "using MSI\n");
6422         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6423                 netdev_info(dev, "using MSIX\n");
6424
6425         netif_tx_start_all_queues(dev);
6426 out:
6427         return rc;
6428
6429 open_err:
6430         bnx2_napi_disable(bp);
6431         bnx2_free_skbs(bp);
6432         bnx2_free_irq(bp);
6433         bnx2_free_mem(bp);
6434         bnx2_del_napi(bp);
6435         bnx2_release_firmware(bp);
6436         goto out;
6437 }
6438
6439 static void
6440 bnx2_reset_task(struct work_struct *work)
6441 {
6442         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6443         int rc;
6444         u16 pcicmd;
6445
6446         rtnl_lock();
6447         if (!netif_running(bp->dev)) {
6448                 rtnl_unlock();
6449                 return;
6450         }
6451
6452         bnx2_netif_stop(bp, true);
6453
6454         pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
6455         if (!(pcicmd & PCI_COMMAND_MEMORY)) {
6456                 /* in case PCI block has reset */
6457                 pci_restore_state(bp->pdev);
6458                 pci_save_state(bp->pdev);
6459         }
6460         rc = bnx2_init_nic(bp, 1);
6461         if (rc) {
6462                 netdev_err(bp->dev, "failed to reset NIC, closing\n");
6463                 bnx2_napi_enable(bp);
6464                 dev_close(bp->dev);
6465                 rtnl_unlock();
6466                 return;
6467         }
6468
6469         atomic_set(&bp->intr_sem, 1);
6470         bnx2_netif_start(bp, true);
6471         rtnl_unlock();
6472 }
6473
6474 #define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
6475
6476 static void
6477 bnx2_dump_ftq(struct bnx2 *bp)
6478 {
6479         int i;
6480         u32 reg, bdidx, cid, valid;
6481         struct net_device *dev = bp->dev;
6482         static const struct ftq_reg {
6483                 char *name;
6484                 u32 off;
6485         } ftq_arr[] = {
6486                 BNX2_FTQ_ENTRY(RV2P_P),
6487                 BNX2_FTQ_ENTRY(RV2P_T),
6488                 BNX2_FTQ_ENTRY(RV2P_M),
6489                 BNX2_FTQ_ENTRY(TBDR_),
6490                 BNX2_FTQ_ENTRY(TDMA_),
6491                 BNX2_FTQ_ENTRY(TXP_),
6492                 BNX2_FTQ_ENTRY(TXP_),
6493                 BNX2_FTQ_ENTRY(TPAT_),
6494                 BNX2_FTQ_ENTRY(RXP_C),
6495                 BNX2_FTQ_ENTRY(RXP_),
6496                 BNX2_FTQ_ENTRY(COM_COMXQ_),
6497                 BNX2_FTQ_ENTRY(COM_COMTQ_),
6498                 BNX2_FTQ_ENTRY(COM_COMQ_),
6499                 BNX2_FTQ_ENTRY(CP_CPQ_),
6500         };
6501
6502         netdev_err(dev, "<--- start FTQ dump --->\n");
6503         for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
6504                 netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
6505                            bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6506
6507         netdev_err(dev, "CPU states:\n");
6508         for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
6509                 netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
6510                            reg, bnx2_reg_rd_ind(bp, reg),
6511                            bnx2_reg_rd_ind(bp, reg + 4),
6512                            bnx2_reg_rd_ind(bp, reg + 8),
6513                            bnx2_reg_rd_ind(bp, reg + 0x1c),
6514                            bnx2_reg_rd_ind(bp, reg + 0x1c),
6515                            bnx2_reg_rd_ind(bp, reg + 0x20));
6516
6517         netdev_err(dev, "<--- end FTQ dump --->\n");
6518         netdev_err(dev, "<--- start TBDC dump --->\n");
6519         netdev_err(dev, "TBDC free cnt: %ld\n",
6520                    BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6521         netdev_err(dev, "LINE     CID  BIDX   CMD  VALIDS\n");
6522         for (i = 0; i < 0x20; i++) {
6523                 int j = 0;
6524
6525                 BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
6526                 BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
6527                         BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6528                 BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6529                 while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
6530                         BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6531                         j++;
6532
6533                 cid = BNX2_RD(bp, BNX2_TBDC_CID);
6534                 bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
6535                 valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
6536                 netdev_err(dev, "%02x    %06x  %04lx   %02x    [%x]\n",
6537                            i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6538                            bdidx >> 24, (valid >> 8) & 0x0ff);
6539         }
6540         netdev_err(dev, "<--- end TBDC dump --->\n");
6541 }
6542
6543 static void
6544 bnx2_dump_state(struct bnx2 *bp)
6545 {
6546         struct net_device *dev = bp->dev;
6547         u32 val1, val2;
6548
6549         pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6550         netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6551                    atomic_read(&bp->intr_sem), val1);
6552         pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6553         pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6554         netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6555         netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6556                    BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
6557                    BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
6558         netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6559                    BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6560         netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6561                    BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6562         if (bp->flags & BNX2_FLAG_USING_MSIX)
6563                 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6564                            BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6565 }
6566
6567 static void
6568 bnx2_tx_timeout(struct net_device *dev, unsigned int txqueue)
6569 {
6570         struct bnx2 *bp = netdev_priv(dev);
6571
6572         bnx2_dump_ftq(bp);
6573         bnx2_dump_state(bp);
6574         bnx2_dump_mcp_state(bp);
6575
6576         /* This allows the netif to be shutdown gracefully before resetting */
6577         schedule_work(&bp->reset_task);
6578 }
6579
6580 /* Called with netif_tx_lock.
6581  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6582  * netif_wake_queue().
6583  */
6584 static netdev_tx_t
6585 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6586 {
6587         struct bnx2 *bp = netdev_priv(dev);
6588         dma_addr_t mapping;
6589         struct bnx2_tx_bd *txbd;
6590         struct bnx2_sw_tx_bd *tx_buf;
6591         u32 len, vlan_tag_flags, last_frag, mss;
6592         u16 prod, ring_prod;
6593         int i;
6594         struct bnx2_napi *bnapi;
6595         struct bnx2_tx_ring_info *txr;
6596         struct netdev_queue *txq;
6597
6598         /*  Determine which tx ring we will be placed on */
6599         i = skb_get_queue_mapping(skb);
6600         bnapi = &bp->bnx2_napi[i];
6601         txr = &bnapi->tx_ring;
6602         txq = netdev_get_tx_queue(dev, i);
6603
6604         if (unlikely(bnx2_tx_avail(bp, txr) <
6605             (skb_shinfo(skb)->nr_frags + 1))) {
6606                 netif_tx_stop_queue(txq);
6607                 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6608
6609                 return NETDEV_TX_BUSY;
6610         }
6611         len = skb_headlen(skb);
6612         prod = txr->tx_prod;
6613         ring_prod = BNX2_TX_RING_IDX(prod);
6614
6615         vlan_tag_flags = 0;
6616         if (skb->ip_summed == CHECKSUM_PARTIAL) {
6617                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6618         }
6619
6620         if (skb_vlan_tag_present(skb)) {
6621                 vlan_tag_flags |=
6622                         (TX_BD_FLAGS_VLAN_TAG | (skb_vlan_tag_get(skb) << 16));
6623         }
6624
6625         if ((mss = skb_shinfo(skb)->gso_size)) {
6626                 u32 tcp_opt_len;
6627                 struct iphdr *iph;
6628
6629                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6630
6631                 tcp_opt_len = tcp_optlen(skb);
6632
6633                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6634                         u32 tcp_off = skb_transport_offset(skb) -
6635                                       sizeof(struct ipv6hdr) - ETH_HLEN;
6636
6637                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6638                                           TX_BD_FLAGS_SW_FLAGS;
6639                         if (likely(tcp_off == 0))
6640                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6641                         else {
6642                                 tcp_off >>= 3;
6643                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
6644                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
6645                                                   ((tcp_off & 0x10) <<
6646                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
6647                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6648                         }
6649                 } else {
6650                         iph = ip_hdr(skb);
6651                         if (tcp_opt_len || (iph->ihl > 5)) {
6652                                 vlan_tag_flags |= ((iph->ihl - 5) +
6653                                                    (tcp_opt_len >> 2)) << 8;
6654                         }
6655                 }
6656         } else
6657                 mss = 0;
6658
6659         mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6660         if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6661                 dev_kfree_skb_any(skb);
6662                 return NETDEV_TX_OK;
6663         }
6664
6665         tx_buf = &txr->tx_buf_ring[ring_prod];
6666         tx_buf->skb = skb;
6667         dma_unmap_addr_set(tx_buf, mapping, mapping);
6668
6669         txbd = &txr->tx_desc_ring[ring_prod];
6670
6671         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6672         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6673         txbd->tx_bd_mss_nbytes = len | (mss << 16);
6674         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6675
6676         last_frag = skb_shinfo(skb)->nr_frags;
6677         tx_buf->nr_frags = last_frag;
6678         tx_buf->is_gso = skb_is_gso(skb);
6679
6680         for (i = 0; i < last_frag; i++) {
6681                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6682
6683                 prod = BNX2_NEXT_TX_BD(prod);
6684                 ring_prod = BNX2_TX_RING_IDX(prod);
6685                 txbd = &txr->tx_desc_ring[ring_prod];
6686
6687                 len = skb_frag_size(frag);
6688                 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6689                                            DMA_TO_DEVICE);
6690                 if (dma_mapping_error(&bp->pdev->dev, mapping))
6691                         goto dma_error;
6692                 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6693                                    mapping);
6694
6695                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6696                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6697                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6698                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6699
6700         }
6701         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6702
6703         /* Sync BD data before updating TX mailbox */
6704         wmb();
6705
6706         netdev_tx_sent_queue(txq, skb->len);
6707
6708         prod = BNX2_NEXT_TX_BD(prod);
6709         txr->tx_prod_bseq += skb->len;
6710
6711         BNX2_WR16(bp, txr->tx_bidx_addr, prod);
6712         BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6713
6714         txr->tx_prod = prod;
6715
6716         if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6717                 netif_tx_stop_queue(txq);
6718
6719                 /* netif_tx_stop_queue() must be done before checking
6720                  * tx index in bnx2_tx_avail() below, because in
6721                  * bnx2_tx_int(), we update tx index before checking for
6722                  * netif_tx_queue_stopped().
6723                  */
6724                 smp_mb();
6725                 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6726                         netif_tx_wake_queue(txq);
6727         }
6728
6729         return NETDEV_TX_OK;
6730 dma_error:
6731         /* save value of frag that failed */
6732         last_frag = i;
6733
6734         /* start back at beginning and unmap skb */
6735         prod = txr->tx_prod;
6736         ring_prod = BNX2_TX_RING_IDX(prod);
6737         tx_buf = &txr->tx_buf_ring[ring_prod];
6738         tx_buf->skb = NULL;
6739         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6740                          skb_headlen(skb), PCI_DMA_TODEVICE);
6741
6742         /* unmap remaining mapped pages */
6743         for (i = 0; i < last_frag; i++) {
6744                 prod = BNX2_NEXT_TX_BD(prod);
6745                 ring_prod = BNX2_TX_RING_IDX(prod);
6746                 tx_buf = &txr->tx_buf_ring[ring_prod];
6747                 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6748                                skb_frag_size(&skb_shinfo(skb)->frags[i]),
6749                                PCI_DMA_TODEVICE);
6750         }
6751
6752         dev_kfree_skb_any(skb);
6753         return NETDEV_TX_OK;
6754 }
6755
6756 /* Called with rtnl_lock */
6757 static int
6758 bnx2_close(struct net_device *dev)
6759 {
6760         struct bnx2 *bp = netdev_priv(dev);
6761
6762         bnx2_disable_int_sync(bp);
6763         bnx2_napi_disable(bp);
6764         netif_tx_disable(dev);
6765         del_timer_sync(&bp->timer);
6766         bnx2_shutdown_chip(bp);
6767         bnx2_free_irq(bp);
6768         bnx2_free_skbs(bp);
6769         bnx2_free_mem(bp);
6770         bnx2_del_napi(bp);
6771         bp->link_up = 0;
6772         netif_carrier_off(bp->dev);
6773         return 0;
6774 }
6775
6776 static void
6777 bnx2_save_stats(struct bnx2 *bp)
6778 {
6779         u32 *hw_stats = (u32 *) bp->stats_blk;
6780         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6781         int i;
6782
6783         /* The 1st 10 counters are 64-bit counters */
6784         for (i = 0; i < 20; i += 2) {
6785                 u32 hi;
6786                 u64 lo;
6787
6788                 hi = temp_stats[i] + hw_stats[i];
6789                 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6790                 if (lo > 0xffffffff)
6791                         hi++;
6792                 temp_stats[i] = hi;
6793                 temp_stats[i + 1] = lo & 0xffffffff;
6794         }
6795
6796         for ( ; i < sizeof(struct statistics_block) / 4; i++)
6797                 temp_stats[i] += hw_stats[i];
6798 }
6799
6800 #define GET_64BIT_NET_STATS64(ctr)              \
6801         (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6802
6803 #define GET_64BIT_NET_STATS(ctr)                                \
6804         GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +             \
6805         GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6806
6807 #define GET_32BIT_NET_STATS(ctr)                                \
6808         (unsigned long) (bp->stats_blk->ctr +                   \
6809                          bp->temp_stats_blk->ctr)
6810
6811 static void
6812 bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6813 {
6814         struct bnx2 *bp = netdev_priv(dev);
6815
6816         if (!bp->stats_blk)
6817                 return;
6818
6819         net_stats->rx_packets =
6820                 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6821                 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6822                 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6823
6824         net_stats->tx_packets =
6825                 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6826                 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6827                 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6828
6829         net_stats->rx_bytes =
6830                 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6831
6832         net_stats->tx_bytes =
6833                 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6834
6835         net_stats->multicast =
6836                 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6837
6838         net_stats->collisions =
6839                 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6840
6841         net_stats->rx_length_errors =
6842                 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6843                 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6844
6845         net_stats->rx_over_errors =
6846                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6847                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6848
6849         net_stats->rx_frame_errors =
6850                 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6851
6852         net_stats->rx_crc_errors =
6853                 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6854
6855         net_stats->rx_errors = net_stats->rx_length_errors +
6856                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6857                 net_stats->rx_crc_errors;
6858
6859         net_stats->tx_aborted_errors =
6860                 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6861                 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6862
6863         if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
6864             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
6865                 net_stats->tx_carrier_errors = 0;
6866         else {
6867                 net_stats->tx_carrier_errors =
6868                         GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6869         }
6870
6871         net_stats->tx_errors =
6872                 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6873                 net_stats->tx_aborted_errors +
6874                 net_stats->tx_carrier_errors;
6875
6876         net_stats->rx_missed_errors =
6877                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6878                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6879                 GET_32BIT_NET_STATS(stat_FwRxDrop);
6880
6881 }
6882
6883 /* All ethtool functions called with rtnl_lock */
6884
6885 static int
6886 bnx2_get_link_ksettings(struct net_device *dev,
6887                         struct ethtool_link_ksettings *cmd)
6888 {
6889         struct bnx2 *bp = netdev_priv(dev);
6890         int support_serdes = 0, support_copper = 0;
6891         u32 supported, advertising;
6892
6893         supported = SUPPORTED_Autoneg;
6894         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6895                 support_serdes = 1;
6896                 support_copper = 1;
6897         } else if (bp->phy_port == PORT_FIBRE)
6898                 support_serdes = 1;
6899         else
6900                 support_copper = 1;
6901
6902         if (support_serdes) {
6903                 supported |= SUPPORTED_1000baseT_Full |
6904                         SUPPORTED_FIBRE;
6905                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6906                         supported |= SUPPORTED_2500baseX_Full;
6907         }
6908         if (support_copper) {
6909                 supported |= SUPPORTED_10baseT_Half |
6910                         SUPPORTED_10baseT_Full |
6911                         SUPPORTED_100baseT_Half |
6912                         SUPPORTED_100baseT_Full |
6913                         SUPPORTED_1000baseT_Full |
6914                         SUPPORTED_TP;
6915         }
6916
6917         spin_lock_bh(&bp->phy_lock);
6918         cmd->base.port = bp->phy_port;
6919         advertising = bp->advertising;
6920
6921         if (bp->autoneg & AUTONEG_SPEED) {
6922                 cmd->base.autoneg = AUTONEG_ENABLE;
6923         } else {
6924                 cmd->base.autoneg = AUTONEG_DISABLE;
6925         }
6926
6927         if (netif_carrier_ok(dev)) {
6928                 cmd->base.speed = bp->line_speed;
6929                 cmd->base.duplex = bp->duplex;
6930                 if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) {
6931                         if (bp->phy_flags & BNX2_PHY_FLAG_MDIX)
6932                                 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
6933                         else
6934                                 cmd->base.eth_tp_mdix = ETH_TP_MDI;
6935                 }
6936         }
6937         else {
6938                 cmd->base.speed = SPEED_UNKNOWN;
6939                 cmd->base.duplex = DUPLEX_UNKNOWN;
6940         }
6941         spin_unlock_bh(&bp->phy_lock);
6942
6943         cmd->base.phy_address = bp->phy_addr;
6944
6945         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
6946                                                 supported);
6947         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
6948                                                 advertising);
6949
6950         return 0;
6951 }
6952
6953 static int
6954 bnx2_set_link_ksettings(struct net_device *dev,
6955                         const struct ethtool_link_ksettings *cmd)
6956 {
6957         struct bnx2 *bp = netdev_priv(dev);
6958         u8 autoneg = bp->autoneg;
6959         u8 req_duplex = bp->req_duplex;
6960         u16 req_line_speed = bp->req_line_speed;
6961         u32 advertising = bp->advertising;
6962         int err = -EINVAL;
6963
6964         spin_lock_bh(&bp->phy_lock);
6965
6966         if (cmd->base.port != PORT_TP && cmd->base.port != PORT_FIBRE)
6967                 goto err_out_unlock;
6968
6969         if (cmd->base.port != bp->phy_port &&
6970             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6971                 goto err_out_unlock;
6972
6973         /* If device is down, we can store the settings only if the user
6974          * is setting the currently active port.
6975          */
6976         if (!netif_running(dev) && cmd->base.port != bp->phy_port)
6977                 goto err_out_unlock;
6978
6979         if (cmd->base.autoneg == AUTONEG_ENABLE) {
6980                 autoneg |= AUTONEG_SPEED;
6981
6982                 ethtool_convert_link_mode_to_legacy_u32(
6983                         &advertising, cmd->link_modes.advertising);
6984
6985                 if (cmd->base.port == PORT_TP) {
6986                         advertising &= ETHTOOL_ALL_COPPER_SPEED;
6987                         if (!advertising)
6988                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6989                 } else {
6990                         advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6991                         if (!advertising)
6992                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6993                 }
6994                 advertising |= ADVERTISED_Autoneg;
6995         }
6996         else {
6997                 u32 speed = cmd->base.speed;
6998
6999                 if (cmd->base.port == PORT_FIBRE) {
7000                         if ((speed != SPEED_1000 &&
7001                              speed != SPEED_2500) ||
7002                             (cmd->base.duplex != DUPLEX_FULL))
7003                                 goto err_out_unlock;
7004
7005                         if (speed == SPEED_2500 &&
7006                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
7007                                 goto err_out_unlock;
7008                 } else if (speed == SPEED_1000 || speed == SPEED_2500)
7009                         goto err_out_unlock;
7010
7011                 autoneg &= ~AUTONEG_SPEED;
7012                 req_line_speed = speed;
7013                 req_duplex = cmd->base.duplex;
7014                 advertising = 0;
7015         }
7016
7017         bp->autoneg = autoneg;
7018         bp->advertising = advertising;
7019         bp->req_line_speed = req_line_speed;
7020         bp->req_duplex = req_duplex;
7021
7022         err = 0;
7023         /* If device is down, the new settings will be picked up when it is
7024          * brought up.
7025          */
7026         if (netif_running(dev))
7027                 err = bnx2_setup_phy(bp, cmd->base.port);
7028
7029 err_out_unlock:
7030         spin_unlock_bh(&bp->phy_lock);
7031
7032         return err;
7033 }
7034
7035 static void
7036 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7037 {
7038         struct bnx2 *bp = netdev_priv(dev);
7039
7040         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
7041         strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
7042         strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
7043 }
7044
7045 #define BNX2_REGDUMP_LEN                (32 * 1024)
7046
7047 static int
7048 bnx2_get_regs_len(struct net_device *dev)
7049 {
7050         return BNX2_REGDUMP_LEN;
7051 }
7052
7053 static void
7054 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
7055 {
7056         u32 *p = _p, i, offset;
7057         u8 *orig_p = _p;
7058         struct bnx2 *bp = netdev_priv(dev);
7059         static const u32 reg_boundaries[] = {
7060                 0x0000, 0x0098, 0x0400, 0x045c,
7061                 0x0800, 0x0880, 0x0c00, 0x0c10,
7062                 0x0c30, 0x0d08, 0x1000, 0x101c,
7063                 0x1040, 0x1048, 0x1080, 0x10a4,
7064                 0x1400, 0x1490, 0x1498, 0x14f0,
7065                 0x1500, 0x155c, 0x1580, 0x15dc,
7066                 0x1600, 0x1658, 0x1680, 0x16d8,
7067                 0x1800, 0x1820, 0x1840, 0x1854,
7068                 0x1880, 0x1894, 0x1900, 0x1984,
7069                 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
7070                 0x1c80, 0x1c94, 0x1d00, 0x1d84,
7071                 0x2000, 0x2030, 0x23c0, 0x2400,
7072                 0x2800, 0x2820, 0x2830, 0x2850,
7073                 0x2b40, 0x2c10, 0x2fc0, 0x3058,
7074                 0x3c00, 0x3c94, 0x4000, 0x4010,
7075                 0x4080, 0x4090, 0x43c0, 0x4458,
7076                 0x4c00, 0x4c18, 0x4c40, 0x4c54,
7077                 0x4fc0, 0x5010, 0x53c0, 0x5444,
7078                 0x5c00, 0x5c18, 0x5c80, 0x5c90,
7079                 0x5fc0, 0x6000, 0x6400, 0x6428,
7080                 0x6800, 0x6848, 0x684c, 0x6860,
7081                 0x6888, 0x6910, 0x8000
7082         };
7083
7084         regs->version = 0;
7085
7086         memset(p, 0, BNX2_REGDUMP_LEN);
7087
7088         if (!netif_running(bp->dev))
7089                 return;
7090
7091         i = 0;
7092         offset = reg_boundaries[0];
7093         p += offset;
7094         while (offset < BNX2_REGDUMP_LEN) {
7095                 *p++ = BNX2_RD(bp, offset);
7096                 offset += 4;
7097                 if (offset == reg_boundaries[i + 1]) {
7098                         offset = reg_boundaries[i + 2];
7099                         p = (u32 *) (orig_p + offset);
7100                         i += 2;
7101                 }
7102         }
7103 }
7104
7105 static void
7106 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7107 {
7108         struct bnx2 *bp = netdev_priv(dev);
7109
7110         if (bp->flags & BNX2_FLAG_NO_WOL) {
7111                 wol->supported = 0;
7112                 wol->wolopts = 0;
7113         }
7114         else {
7115                 wol->supported = WAKE_MAGIC;
7116                 if (bp->wol)
7117                         wol->wolopts = WAKE_MAGIC;
7118                 else
7119                         wol->wolopts = 0;
7120         }
7121         memset(&wol->sopass, 0, sizeof(wol->sopass));
7122 }
7123
7124 static int
7125 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7126 {
7127         struct bnx2 *bp = netdev_priv(dev);
7128
7129         if (wol->wolopts & ~WAKE_MAGIC)
7130                 return -EINVAL;
7131
7132         if (wol->wolopts & WAKE_MAGIC) {
7133                 if (bp->flags & BNX2_FLAG_NO_WOL)
7134                         return -EINVAL;
7135
7136                 bp->wol = 1;
7137         }
7138         else {
7139                 bp->wol = 0;
7140         }
7141
7142         device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
7143
7144         return 0;
7145 }
7146
7147 static int
7148 bnx2_nway_reset(struct net_device *dev)
7149 {
7150         struct bnx2 *bp = netdev_priv(dev);
7151         u32 bmcr;
7152
7153         if (!netif_running(dev))
7154                 return -EAGAIN;
7155
7156         if (!(bp->autoneg & AUTONEG_SPEED)) {
7157                 return -EINVAL;
7158         }
7159
7160         spin_lock_bh(&bp->phy_lock);
7161
7162         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7163                 int rc;
7164
7165                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7166                 spin_unlock_bh(&bp->phy_lock);
7167                 return rc;
7168         }
7169
7170         /* Force a link down visible on the other side */
7171         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7172                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7173                 spin_unlock_bh(&bp->phy_lock);
7174
7175                 msleep(20);
7176
7177                 spin_lock_bh(&bp->phy_lock);
7178
7179                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7180                 bp->serdes_an_pending = 1;
7181                 mod_timer(&bp->timer, jiffies + bp->current_interval);
7182         }
7183
7184         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7185         bmcr &= ~BMCR_LOOPBACK;
7186         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7187
7188         spin_unlock_bh(&bp->phy_lock);
7189
7190         return 0;
7191 }
7192
7193 static u32
7194 bnx2_get_link(struct net_device *dev)
7195 {
7196         struct bnx2 *bp = netdev_priv(dev);
7197
7198         return bp->link_up;
7199 }
7200
7201 static int
7202 bnx2_get_eeprom_len(struct net_device *dev)
7203 {
7204         struct bnx2 *bp = netdev_priv(dev);
7205
7206         if (!bp->flash_info)
7207                 return 0;
7208
7209         return (int) bp->flash_size;
7210 }
7211
7212 static int
7213 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7214                 u8 *eebuf)
7215 {
7216         struct bnx2 *bp = netdev_priv(dev);
7217         int rc;
7218
7219         /* parameters already validated in ethtool_get_eeprom */
7220
7221         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7222
7223         return rc;
7224 }
7225
7226 static int
7227 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7228                 u8 *eebuf)
7229 {
7230         struct bnx2 *bp = netdev_priv(dev);
7231         int rc;
7232
7233         /* parameters already validated in ethtool_set_eeprom */
7234
7235         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7236
7237         return rc;
7238 }
7239
7240 static int
7241 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7242 {
7243         struct bnx2 *bp = netdev_priv(dev);
7244
7245         memset(coal, 0, sizeof(struct ethtool_coalesce));
7246
7247         coal->rx_coalesce_usecs = bp->rx_ticks;
7248         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7249         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7250         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7251
7252         coal->tx_coalesce_usecs = bp->tx_ticks;
7253         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7254         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7255         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7256
7257         coal->stats_block_coalesce_usecs = bp->stats_ticks;
7258
7259         return 0;
7260 }
7261
7262 static int
7263 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7264 {
7265         struct bnx2 *bp = netdev_priv(dev);
7266
7267         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7268         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7269
7270         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7271         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7272
7273         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7274         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7275
7276         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7277         if (bp->rx_quick_cons_trip_int > 0xff)
7278                 bp->rx_quick_cons_trip_int = 0xff;
7279
7280         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7281         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7282
7283         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7284         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7285
7286         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7287         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7288
7289         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7290         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7291                 0xff;
7292
7293         bp->stats_ticks = coal->stats_block_coalesce_usecs;
7294         if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7295                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7296                         bp->stats_ticks = USEC_PER_SEC;
7297         }
7298         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7299                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7300         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7301
7302         if (netif_running(bp->dev)) {
7303                 bnx2_netif_stop(bp, true);
7304                 bnx2_init_nic(bp, 0);
7305                 bnx2_netif_start(bp, true);
7306         }
7307
7308         return 0;
7309 }
7310
7311 static void
7312 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7313 {
7314         struct bnx2 *bp = netdev_priv(dev);
7315
7316         ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
7317         ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
7318
7319         ering->rx_pending = bp->rx_ring_size;
7320         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7321
7322         ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
7323         ering->tx_pending = bp->tx_ring_size;
7324 }
7325
7326 static int
7327 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7328 {
7329         if (netif_running(bp->dev)) {
7330                 /* Reset will erase chipset stats; save them */
7331                 bnx2_save_stats(bp);
7332
7333                 bnx2_netif_stop(bp, true);
7334                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7335                 if (reset_irq) {
7336                         bnx2_free_irq(bp);
7337                         bnx2_del_napi(bp);
7338                 } else {
7339                         __bnx2_free_irq(bp);
7340                 }
7341                 bnx2_free_skbs(bp);
7342                 bnx2_free_mem(bp);
7343         }
7344
7345         bnx2_set_rx_ring_size(bp, rx);
7346         bp->tx_ring_size = tx;
7347
7348         if (netif_running(bp->dev)) {
7349                 int rc = 0;
7350
7351                 if (reset_irq) {
7352                         rc = bnx2_setup_int_mode(bp, disable_msi);
7353                         bnx2_init_napi(bp);
7354                 }
7355
7356                 if (!rc)
7357                         rc = bnx2_alloc_mem(bp);
7358
7359                 if (!rc)
7360                         rc = bnx2_request_irq(bp);
7361
7362                 if (!rc)
7363                         rc = bnx2_init_nic(bp, 0);
7364
7365                 if (rc) {
7366                         bnx2_napi_enable(bp);
7367                         dev_close(bp->dev);
7368                         return rc;
7369                 }
7370 #ifdef BCM_CNIC
7371                 mutex_lock(&bp->cnic_lock);
7372                 /* Let cnic know about the new status block. */
7373                 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7374                         bnx2_setup_cnic_irq_info(bp);
7375                 mutex_unlock(&bp->cnic_lock);
7376 #endif
7377                 bnx2_netif_start(bp, true);
7378         }
7379         return 0;
7380 }
7381
7382 static int
7383 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7384 {
7385         struct bnx2 *bp = netdev_priv(dev);
7386         int rc;
7387
7388         if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
7389                 (ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
7390                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7391
7392                 return -EINVAL;
7393         }
7394         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7395                                    false);
7396         return rc;
7397 }
7398
7399 static void
7400 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7401 {
7402         struct bnx2 *bp = netdev_priv(dev);
7403
7404         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7405         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7406         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7407 }
7408
7409 static int
7410 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7411 {
7412         struct bnx2 *bp = netdev_priv(dev);
7413
7414         bp->req_flow_ctrl = 0;
7415         if (epause->rx_pause)
7416                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7417         if (epause->tx_pause)
7418                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7419
7420         if (epause->autoneg) {
7421                 bp->autoneg |= AUTONEG_FLOW_CTRL;
7422         }
7423         else {
7424                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7425         }
7426
7427         if (netif_running(dev)) {
7428                 spin_lock_bh(&bp->phy_lock);
7429                 bnx2_setup_phy(bp, bp->phy_port);
7430                 spin_unlock_bh(&bp->phy_lock);
7431         }
7432
7433         return 0;
7434 }
7435
7436 static struct {
7437         char string[ETH_GSTRING_LEN];
7438 } bnx2_stats_str_arr[] = {
7439         { "rx_bytes" },
7440         { "rx_error_bytes" },
7441         { "tx_bytes" },
7442         { "tx_error_bytes" },
7443         { "rx_ucast_packets" },
7444         { "rx_mcast_packets" },
7445         { "rx_bcast_packets" },
7446         { "tx_ucast_packets" },
7447         { "tx_mcast_packets" },
7448         { "tx_bcast_packets" },
7449         { "tx_mac_errors" },
7450         { "tx_carrier_errors" },
7451         { "rx_crc_errors" },
7452         { "rx_align_errors" },
7453         { "tx_single_collisions" },
7454         { "tx_multi_collisions" },
7455         { "tx_deferred" },
7456         { "tx_excess_collisions" },
7457         { "tx_late_collisions" },
7458         { "tx_total_collisions" },
7459         { "rx_fragments" },
7460         { "rx_jabbers" },
7461         { "rx_undersize_packets" },
7462         { "rx_oversize_packets" },
7463         { "rx_64_byte_packets" },
7464         { "rx_65_to_127_byte_packets" },
7465         { "rx_128_to_255_byte_packets" },
7466         { "rx_256_to_511_byte_packets" },
7467         { "rx_512_to_1023_byte_packets" },
7468         { "rx_1024_to_1522_byte_packets" },
7469         { "rx_1523_to_9022_byte_packets" },
7470         { "tx_64_byte_packets" },
7471         { "tx_65_to_127_byte_packets" },
7472         { "tx_128_to_255_byte_packets" },
7473         { "tx_256_to_511_byte_packets" },
7474         { "tx_512_to_1023_byte_packets" },
7475         { "tx_1024_to_1522_byte_packets" },
7476         { "tx_1523_to_9022_byte_packets" },
7477         { "rx_xon_frames" },
7478         { "rx_xoff_frames" },
7479         { "tx_xon_frames" },
7480         { "tx_xoff_frames" },
7481         { "rx_mac_ctrl_frames" },
7482         { "rx_filtered_packets" },
7483         { "rx_ftq_discards" },
7484         { "rx_discards" },
7485         { "rx_fw_discards" },
7486 };
7487
7488 #define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7489
7490 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7491
7492 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7493     STATS_OFFSET32(stat_IfHCInOctets_hi),
7494     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7495     STATS_OFFSET32(stat_IfHCOutOctets_hi),
7496     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7497     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7498     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7499     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7500     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7501     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7502     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7503     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7504     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7505     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7506     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7507     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7508     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7509     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7510     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7511     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7512     STATS_OFFSET32(stat_EtherStatsCollisions),
7513     STATS_OFFSET32(stat_EtherStatsFragments),
7514     STATS_OFFSET32(stat_EtherStatsJabbers),
7515     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7516     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7517     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7518     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7519     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7520     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7521     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7522     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7523     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7524     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7525     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7526     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7527     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7528     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7529     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7530     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7531     STATS_OFFSET32(stat_XonPauseFramesReceived),
7532     STATS_OFFSET32(stat_XoffPauseFramesReceived),
7533     STATS_OFFSET32(stat_OutXonSent),
7534     STATS_OFFSET32(stat_OutXoffSent),
7535     STATS_OFFSET32(stat_MacControlFramesReceived),
7536     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7537     STATS_OFFSET32(stat_IfInFTQDiscards),
7538     STATS_OFFSET32(stat_IfInMBUFDiscards),
7539     STATS_OFFSET32(stat_FwRxDrop),
7540 };
7541
7542 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7543  * skipped because of errata.
7544  */
7545 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7546         8,0,8,8,8,8,8,8,8,8,
7547         4,0,4,4,4,4,4,4,4,4,
7548         4,4,4,4,4,4,4,4,4,4,
7549         4,4,4,4,4,4,4,4,4,4,
7550         4,4,4,4,4,4,4,
7551 };
7552
7553 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7554         8,0,8,8,8,8,8,8,8,8,
7555         4,4,4,4,4,4,4,4,4,4,
7556         4,4,4,4,4,4,4,4,4,4,
7557         4,4,4,4,4,4,4,4,4,4,
7558         4,4,4,4,4,4,4,
7559 };
7560
7561 #define BNX2_NUM_TESTS 6
7562
7563 static struct {
7564         char string[ETH_GSTRING_LEN];
7565 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7566         { "register_test (offline)" },
7567         { "memory_test (offline)" },
7568         { "loopback_test (offline)" },
7569         { "nvram_test (online)" },
7570         { "interrupt_test (online)" },
7571         { "link_test (online)" },
7572 };
7573
7574 static int
7575 bnx2_get_sset_count(struct net_device *dev, int sset)
7576 {
7577         switch (sset) {
7578         case ETH_SS_TEST:
7579                 return BNX2_NUM_TESTS;
7580         case ETH_SS_STATS:
7581                 return BNX2_NUM_STATS;
7582         default:
7583                 return -EOPNOTSUPP;
7584         }
7585 }
7586
7587 static void
7588 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7589 {
7590         struct bnx2 *bp = netdev_priv(dev);
7591
7592         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7593         if (etest->flags & ETH_TEST_FL_OFFLINE) {
7594                 int i;
7595
7596                 bnx2_netif_stop(bp, true);
7597                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7598                 bnx2_free_skbs(bp);
7599
7600                 if (bnx2_test_registers(bp) != 0) {
7601                         buf[0] = 1;
7602                         etest->flags |= ETH_TEST_FL_FAILED;
7603                 }
7604                 if (bnx2_test_memory(bp) != 0) {
7605                         buf[1] = 1;
7606                         etest->flags |= ETH_TEST_FL_FAILED;
7607                 }
7608                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7609                         etest->flags |= ETH_TEST_FL_FAILED;
7610
7611                 if (!netif_running(bp->dev))
7612                         bnx2_shutdown_chip(bp);
7613                 else {
7614                         bnx2_init_nic(bp, 1);
7615                         bnx2_netif_start(bp, true);
7616                 }
7617
7618                 /* wait for link up */
7619                 for (i = 0; i < 7; i++) {
7620                         if (bp->link_up)
7621                                 break;
7622                         msleep_interruptible(1000);
7623                 }
7624         }
7625
7626         if (bnx2_test_nvram(bp) != 0) {
7627                 buf[3] = 1;
7628                 etest->flags |= ETH_TEST_FL_FAILED;
7629         }
7630         if (bnx2_test_intr(bp) != 0) {
7631                 buf[4] = 1;
7632                 etest->flags |= ETH_TEST_FL_FAILED;
7633         }
7634
7635         if (bnx2_test_link(bp) != 0) {
7636                 buf[5] = 1;
7637                 etest->flags |= ETH_TEST_FL_FAILED;
7638
7639         }
7640 }
7641
7642 static void
7643 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7644 {
7645         switch (stringset) {
7646         case ETH_SS_STATS:
7647                 memcpy(buf, bnx2_stats_str_arr,
7648                         sizeof(bnx2_stats_str_arr));
7649                 break;
7650         case ETH_SS_TEST:
7651                 memcpy(buf, bnx2_tests_str_arr,
7652                         sizeof(bnx2_tests_str_arr));
7653                 break;
7654         }
7655 }
7656
7657 static void
7658 bnx2_get_ethtool_stats(struct net_device *dev,
7659                 struct ethtool_stats *stats, u64 *buf)
7660 {
7661         struct bnx2 *bp = netdev_priv(dev);
7662         int i;
7663         u32 *hw_stats = (u32 *) bp->stats_blk;
7664         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7665         u8 *stats_len_arr = NULL;
7666
7667         if (!hw_stats) {
7668                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7669                 return;
7670         }
7671
7672         if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
7673             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
7674             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
7675             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
7676                 stats_len_arr = bnx2_5706_stats_len_arr;
7677         else
7678                 stats_len_arr = bnx2_5708_stats_len_arr;
7679
7680         for (i = 0; i < BNX2_NUM_STATS; i++) {
7681                 unsigned long offset;
7682
7683                 if (stats_len_arr[i] == 0) {
7684                         /* skip this counter */
7685                         buf[i] = 0;
7686                         continue;
7687                 }
7688
7689                 offset = bnx2_stats_offset_arr[i];
7690                 if (stats_len_arr[i] == 4) {
7691                         /* 4-byte counter */
7692                         buf[i] = (u64) *(hw_stats + offset) +
7693                                  *(temp_stats + offset);
7694                         continue;
7695                 }
7696                 /* 8-byte counter */
7697                 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7698                          *(hw_stats + offset + 1) +
7699                          (((u64) *(temp_stats + offset)) << 32) +
7700                          *(temp_stats + offset + 1);
7701         }
7702 }
7703
7704 static int
7705 bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7706 {
7707         struct bnx2 *bp = netdev_priv(dev);
7708
7709         switch (state) {
7710         case ETHTOOL_ID_ACTIVE:
7711                 bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7712                 BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7713                 return 1;       /* cycle on/off once per second */
7714
7715         case ETHTOOL_ID_ON:
7716                 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7717                         BNX2_EMAC_LED_1000MB_OVERRIDE |
7718                         BNX2_EMAC_LED_100MB_OVERRIDE |
7719                         BNX2_EMAC_LED_10MB_OVERRIDE |
7720                         BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7721                         BNX2_EMAC_LED_TRAFFIC);
7722                 break;
7723
7724         case ETHTOOL_ID_OFF:
7725                 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7726                 break;
7727
7728         case ETHTOOL_ID_INACTIVE:
7729                 BNX2_WR(bp, BNX2_EMAC_LED, 0);
7730                 BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7731                 break;
7732         }
7733
7734         return 0;
7735 }
7736
7737 static int
7738 bnx2_set_features(struct net_device *dev, netdev_features_t features)
7739 {
7740         struct bnx2 *bp = netdev_priv(dev);
7741
7742         /* TSO with VLAN tag won't work with current firmware */
7743         if (features & NETIF_F_HW_VLAN_CTAG_TX)
7744                 dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7745         else
7746                 dev->vlan_features &= ~NETIF_F_ALL_TSO;
7747
7748         if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
7749             !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7750             netif_running(dev)) {
7751                 bnx2_netif_stop(bp, false);
7752                 dev->features = features;
7753                 bnx2_set_rx_mode(dev);
7754                 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7755                 bnx2_netif_start(bp, false);
7756                 return 1;
7757         }
7758
7759         return 0;
7760 }
7761
7762 static void bnx2_get_channels(struct net_device *dev,
7763                               struct ethtool_channels *channels)
7764 {
7765         struct bnx2 *bp = netdev_priv(dev);
7766         u32 max_rx_rings = 1;
7767         u32 max_tx_rings = 1;
7768
7769         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7770                 max_rx_rings = RX_MAX_RINGS;
7771                 max_tx_rings = TX_MAX_RINGS;
7772         }
7773
7774         channels->max_rx = max_rx_rings;
7775         channels->max_tx = max_tx_rings;
7776         channels->max_other = 0;
7777         channels->max_combined = 0;
7778         channels->rx_count = bp->num_rx_rings;
7779         channels->tx_count = bp->num_tx_rings;
7780         channels->other_count = 0;
7781         channels->combined_count = 0;
7782 }
7783
7784 static int bnx2_set_channels(struct net_device *dev,
7785                               struct ethtool_channels *channels)
7786 {
7787         struct bnx2 *bp = netdev_priv(dev);
7788         u32 max_rx_rings = 1;
7789         u32 max_tx_rings = 1;
7790         int rc = 0;
7791
7792         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7793                 max_rx_rings = RX_MAX_RINGS;
7794                 max_tx_rings = TX_MAX_RINGS;
7795         }
7796         if (channels->rx_count > max_rx_rings ||
7797             channels->tx_count > max_tx_rings)
7798                 return -EINVAL;
7799
7800         bp->num_req_rx_rings = channels->rx_count;
7801         bp->num_req_tx_rings = channels->tx_count;
7802
7803         if (netif_running(dev))
7804                 rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7805                                            bp->tx_ring_size, true);
7806
7807         return rc;
7808 }
7809
7810 static const struct ethtool_ops bnx2_ethtool_ops = {
7811         .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
7812                                      ETHTOOL_COALESCE_MAX_FRAMES |
7813                                      ETHTOOL_COALESCE_USECS_IRQ |
7814                                      ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
7815                                      ETHTOOL_COALESCE_STATS_BLOCK_USECS,
7816         .get_drvinfo            = bnx2_get_drvinfo,
7817         .get_regs_len           = bnx2_get_regs_len,
7818         .get_regs               = bnx2_get_regs,
7819         .get_wol                = bnx2_get_wol,
7820         .set_wol                = bnx2_set_wol,
7821         .nway_reset             = bnx2_nway_reset,
7822         .get_link               = bnx2_get_link,
7823         .get_eeprom_len         = bnx2_get_eeprom_len,
7824         .get_eeprom             = bnx2_get_eeprom,
7825         .set_eeprom             = bnx2_set_eeprom,
7826         .get_coalesce           = bnx2_get_coalesce,
7827         .set_coalesce           = bnx2_set_coalesce,
7828         .get_ringparam          = bnx2_get_ringparam,
7829         .set_ringparam          = bnx2_set_ringparam,
7830         .get_pauseparam         = bnx2_get_pauseparam,
7831         .set_pauseparam         = bnx2_set_pauseparam,
7832         .self_test              = bnx2_self_test,
7833         .get_strings            = bnx2_get_strings,
7834         .set_phys_id            = bnx2_set_phys_id,
7835         .get_ethtool_stats      = bnx2_get_ethtool_stats,
7836         .get_sset_count         = bnx2_get_sset_count,
7837         .get_channels           = bnx2_get_channels,
7838         .set_channels           = bnx2_set_channels,
7839         .get_link_ksettings     = bnx2_get_link_ksettings,
7840         .set_link_ksettings     = bnx2_set_link_ksettings,
7841 };
7842
7843 /* Called with rtnl_lock */
7844 static int
7845 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7846 {
7847         struct mii_ioctl_data *data = if_mii(ifr);
7848         struct bnx2 *bp = netdev_priv(dev);
7849         int err;
7850
7851         switch(cmd) {
7852         case SIOCGMIIPHY:
7853                 data->phy_id = bp->phy_addr;
7854
7855                 fallthrough;
7856         case SIOCGMIIREG: {
7857                 u32 mii_regval;
7858
7859                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7860                         return -EOPNOTSUPP;
7861
7862                 if (!netif_running(dev))
7863                         return -EAGAIN;
7864
7865                 spin_lock_bh(&bp->phy_lock);
7866                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7867                 spin_unlock_bh(&bp->phy_lock);
7868
7869                 data->val_out = mii_regval;
7870
7871                 return err;
7872         }
7873
7874         case SIOCSMIIREG:
7875                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7876                         return -EOPNOTSUPP;
7877
7878                 if (!netif_running(dev))
7879                         return -EAGAIN;
7880
7881                 spin_lock_bh(&bp->phy_lock);
7882                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7883                 spin_unlock_bh(&bp->phy_lock);
7884
7885                 return err;
7886
7887         default:
7888                 /* do nothing */
7889                 break;
7890         }
7891         return -EOPNOTSUPP;
7892 }
7893
7894 /* Called with rtnl_lock */
7895 static int
7896 bnx2_change_mac_addr(struct net_device *dev, void *p)
7897 {
7898         struct sockaddr *addr = p;
7899         struct bnx2 *bp = netdev_priv(dev);
7900
7901         if (!is_valid_ether_addr(addr->sa_data))
7902                 return -EADDRNOTAVAIL;
7903
7904         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7905         if (netif_running(dev))
7906                 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7907
7908         return 0;
7909 }
7910
7911 /* Called with rtnl_lock */
7912 static int
7913 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7914 {
7915         struct bnx2 *bp = netdev_priv(dev);
7916
7917         dev->mtu = new_mtu;
7918         return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7919                                      false);
7920 }
7921
7922 #ifdef CONFIG_NET_POLL_CONTROLLER
7923 static void
7924 poll_bnx2(struct net_device *dev)
7925 {
7926         struct bnx2 *bp = netdev_priv(dev);
7927         int i;
7928
7929         for (i = 0; i < bp->irq_nvecs; i++) {
7930                 struct bnx2_irq *irq = &bp->irq_tbl[i];
7931
7932                 disable_irq(irq->vector);
7933                 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7934                 enable_irq(irq->vector);
7935         }
7936 }
7937 #endif
7938
7939 static void
7940 bnx2_get_5709_media(struct bnx2 *bp)
7941 {
7942         u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7943         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7944         u32 strap;
7945
7946         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7947                 return;
7948         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7949                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7950                 return;
7951         }
7952
7953         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7954                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7955         else
7956                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7957
7958         if (bp->func == 0) {
7959                 switch (strap) {
7960                 case 0x4:
7961                 case 0x5:
7962                 case 0x6:
7963                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7964                         return;
7965                 }
7966         } else {
7967                 switch (strap) {
7968                 case 0x1:
7969                 case 0x2:
7970                 case 0x4:
7971                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7972                         return;
7973                 }
7974         }
7975 }
7976
7977 static void
7978 bnx2_get_pci_speed(struct bnx2 *bp)
7979 {
7980         u32 reg;
7981
7982         reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
7983         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7984                 u32 clkreg;
7985
7986                 bp->flags |= BNX2_FLAG_PCIX;
7987
7988                 clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7989
7990                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7991                 switch (clkreg) {
7992                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7993                         bp->bus_speed_mhz = 133;
7994                         break;
7995
7996                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7997                         bp->bus_speed_mhz = 100;
7998                         break;
7999
8000                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
8001                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
8002                         bp->bus_speed_mhz = 66;
8003                         break;
8004
8005                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
8006                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
8007                         bp->bus_speed_mhz = 50;
8008                         break;
8009
8010                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
8011                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
8012                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
8013                         bp->bus_speed_mhz = 33;
8014                         break;
8015                 }
8016         }
8017         else {
8018                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
8019                         bp->bus_speed_mhz = 66;
8020                 else
8021                         bp->bus_speed_mhz = 33;
8022         }
8023
8024         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
8025                 bp->flags |= BNX2_FLAG_PCI_32BIT;
8026
8027 }
8028
8029 static void
8030 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
8031 {
8032         int rc, i, j;
8033         u8 *data;
8034         unsigned int block_end, rosize, len;
8035
8036 #define BNX2_VPD_NVRAM_OFFSET   0x300
8037 #define BNX2_VPD_LEN            128
8038 #define BNX2_MAX_VER_SLEN       30
8039
8040         data = kmalloc(256, GFP_KERNEL);
8041         if (!data)
8042                 return;
8043
8044         rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
8045                              BNX2_VPD_LEN);
8046         if (rc)
8047                 goto vpd_done;
8048
8049         for (i = 0; i < BNX2_VPD_LEN; i += 4) {
8050                 data[i] = data[i + BNX2_VPD_LEN + 3];
8051                 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
8052                 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
8053                 data[i + 3] = data[i + BNX2_VPD_LEN];
8054         }
8055
8056         i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
8057         if (i < 0)
8058                 goto vpd_done;
8059
8060         rosize = pci_vpd_lrdt_size(&data[i]);
8061         i += PCI_VPD_LRDT_TAG_SIZE;
8062         block_end = i + rosize;
8063
8064         if (block_end > BNX2_VPD_LEN)
8065                 goto vpd_done;
8066
8067         j = pci_vpd_find_info_keyword(data, i, rosize,
8068                                       PCI_VPD_RO_KEYWORD_MFR_ID);
8069         if (j < 0)
8070                 goto vpd_done;
8071
8072         len = pci_vpd_info_field_size(&data[j]);
8073
8074         j += PCI_VPD_INFO_FLD_HDR_SIZE;
8075         if (j + len > block_end || len != 4 ||
8076             memcmp(&data[j], "1028", 4))
8077                 goto vpd_done;
8078
8079         j = pci_vpd_find_info_keyword(data, i, rosize,
8080                                       PCI_VPD_RO_KEYWORD_VENDOR0);
8081         if (j < 0)
8082                 goto vpd_done;
8083
8084         len = pci_vpd_info_field_size(&data[j]);
8085
8086         j += PCI_VPD_INFO_FLD_HDR_SIZE;
8087         if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
8088                 goto vpd_done;
8089
8090         memcpy(bp->fw_version, &data[j], len);
8091         bp->fw_version[len] = ' ';
8092
8093 vpd_done:
8094         kfree(data);
8095 }
8096
8097 static int
8098 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8099 {
8100         struct bnx2 *bp;
8101         int rc, i, j;
8102         u32 reg;
8103         u64 dma_mask, persist_dma_mask;
8104         int err;
8105
8106         SET_NETDEV_DEV(dev, &pdev->dev);
8107         bp = netdev_priv(dev);
8108
8109         bp->flags = 0;
8110         bp->phy_flags = 0;
8111
8112         bp->temp_stats_blk =
8113                 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
8114
8115         if (!bp->temp_stats_blk) {
8116                 rc = -ENOMEM;
8117                 goto err_out;
8118         }
8119
8120         /* enable device (incl. PCI PM wakeup), and bus-mastering */
8121         rc = pci_enable_device(pdev);
8122         if (rc) {
8123                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8124                 goto err_out;
8125         }
8126
8127         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8128                 dev_err(&pdev->dev,
8129                         "Cannot find PCI device base address, aborting\n");
8130                 rc = -ENODEV;
8131                 goto err_out_disable;
8132         }
8133
8134         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8135         if (rc) {
8136                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8137                 goto err_out_disable;
8138         }
8139
8140         pci_set_master(pdev);
8141
8142         bp->pm_cap = pdev->pm_cap;
8143         if (bp->pm_cap == 0) {
8144                 dev_err(&pdev->dev,
8145                         "Cannot find power management capability, aborting\n");
8146                 rc = -EIO;
8147                 goto err_out_release;
8148         }
8149
8150         bp->dev = dev;
8151         bp->pdev = pdev;
8152
8153         spin_lock_init(&bp->phy_lock);
8154         spin_lock_init(&bp->indirect_lock);
8155 #ifdef BCM_CNIC
8156         mutex_init(&bp->cnic_lock);
8157 #endif
8158         INIT_WORK(&bp->reset_task, bnx2_reset_task);
8159
8160         bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8161                                                          TX_MAX_TSS_RINGS + 1));
8162         if (!bp->regview) {
8163                 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8164                 rc = -ENOMEM;
8165                 goto err_out_release;
8166         }
8167
8168         /* Configure byte swap and enable write to the reg_window registers.
8169          * Rely on CPU to do target byte swapping on big endian systems
8170          * The chip's target access swapping will not swap all accesses
8171          */
8172         BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8173                 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8174                 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8175
8176         bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
8177
8178         if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
8179                 if (!pci_is_pcie(pdev)) {
8180                         dev_err(&pdev->dev, "Not PCIE, aborting\n");
8181                         rc = -EIO;
8182                         goto err_out_unmap;
8183                 }
8184                 bp->flags |= BNX2_FLAG_PCIE;
8185                 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
8186                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8187
8188                 /* AER (Advanced Error Reporting) hooks */
8189                 err = pci_enable_pcie_error_reporting(pdev);
8190                 if (!err)
8191                         bp->flags |= BNX2_FLAG_AER_ENABLED;
8192
8193         } else {
8194                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8195                 if (bp->pcix_cap == 0) {
8196                         dev_err(&pdev->dev,
8197                                 "Cannot find PCIX capability, aborting\n");
8198                         rc = -EIO;
8199                         goto err_out_unmap;
8200                 }
8201                 bp->flags |= BNX2_FLAG_BROKEN_STATS;
8202         }
8203
8204         if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8205             BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
8206                 if (pdev->msix_cap)
8207                         bp->flags |= BNX2_FLAG_MSIX_CAP;
8208         }
8209
8210         if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
8211             BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
8212                 if (pdev->msi_cap)
8213                         bp->flags |= BNX2_FLAG_MSI_CAP;
8214         }
8215
8216         /* 5708 cannot support DMA addresses > 40-bit.  */
8217         if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
8218                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8219         else
8220                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8221
8222         /* Configure DMA attributes. */
8223         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8224                 dev->features |= NETIF_F_HIGHDMA;
8225                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8226                 if (rc) {
8227                         dev_err(&pdev->dev,
8228                                 "pci_set_consistent_dma_mask failed, aborting\n");
8229                         goto err_out_unmap;
8230                 }
8231         } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8232                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8233                 goto err_out_unmap;
8234         }
8235
8236         if (!(bp->flags & BNX2_FLAG_PCIE))
8237                 bnx2_get_pci_speed(bp);
8238
8239         /* 5706A0 may falsely detect SERR and PERR. */
8240         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8241                 reg = BNX2_RD(bp, PCI_COMMAND);
8242                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8243                 BNX2_WR(bp, PCI_COMMAND, reg);
8244         } else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
8245                 !(bp->flags & BNX2_FLAG_PCIX)) {
8246                 dev_err(&pdev->dev,
8247                         "5706 A1 can only be used in a PCIX bus, aborting\n");
8248                 rc = -EPERM;
8249                 goto err_out_unmap;
8250         }
8251
8252         bnx2_init_nvram(bp);
8253
8254         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8255
8256         if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
8257                 bp->func = 1;
8258
8259         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8260             BNX2_SHM_HDR_SIGNATURE_SIG) {
8261                 u32 off = bp->func << 2;
8262
8263                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8264         } else
8265                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8266
8267         /* Get the permanent MAC address.  First we need to make sure the
8268          * firmware is actually running.
8269          */
8270         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8271
8272         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8273             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8274                 dev_err(&pdev->dev, "Firmware not running, aborting\n");
8275                 rc = -ENODEV;
8276                 goto err_out_unmap;
8277         }
8278
8279         bnx2_read_vpd_fw_ver(bp);
8280
8281         j = strlen(bp->fw_version);
8282         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8283         for (i = 0; i < 3 && j < 24; i++) {
8284                 u8 num, k, skip0;
8285
8286                 if (i == 0) {
8287                         bp->fw_version[j++] = 'b';
8288                         bp->fw_version[j++] = 'c';
8289                         bp->fw_version[j++] = ' ';
8290                 }
8291                 num = (u8) (reg >> (24 - (i * 8)));
8292                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8293                         if (num >= k || !skip0 || k == 1) {
8294                                 bp->fw_version[j++] = (num / k) + '0';
8295                                 skip0 = 0;
8296                         }
8297                 }
8298                 if (i != 2)
8299                         bp->fw_version[j++] = '.';
8300         }
8301         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8302         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8303                 bp->wol = 1;
8304
8305         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8306                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8307
8308                 for (i = 0; i < 30; i++) {
8309                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8310                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8311                                 break;
8312                         msleep(10);
8313                 }
8314         }
8315         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8316         reg &= BNX2_CONDITION_MFW_RUN_MASK;
8317         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8318             reg != BNX2_CONDITION_MFW_RUN_NONE) {
8319                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8320
8321                 if (j < 32)
8322                         bp->fw_version[j++] = ' ';
8323                 for (i = 0; i < 3 && j < 28; i++) {
8324                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8325                         reg = be32_to_cpu(reg);
8326                         memcpy(&bp->fw_version[j], &reg, 4);
8327                         j += 4;
8328                 }
8329         }
8330
8331         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8332         bp->mac_addr[0] = (u8) (reg >> 8);
8333         bp->mac_addr[1] = (u8) reg;
8334
8335         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8336         bp->mac_addr[2] = (u8) (reg >> 24);
8337         bp->mac_addr[3] = (u8) (reg >> 16);
8338         bp->mac_addr[4] = (u8) (reg >> 8);
8339         bp->mac_addr[5] = (u8) reg;
8340
8341         bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
8342         bnx2_set_rx_ring_size(bp, 255);
8343
8344         bp->tx_quick_cons_trip_int = 2;
8345         bp->tx_quick_cons_trip = 20;
8346         bp->tx_ticks_int = 18;
8347         bp->tx_ticks = 80;
8348
8349         bp->rx_quick_cons_trip_int = 2;
8350         bp->rx_quick_cons_trip = 12;
8351         bp->rx_ticks_int = 18;
8352         bp->rx_ticks = 18;
8353
8354         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8355
8356         bp->current_interval = BNX2_TIMER_INTERVAL;
8357
8358         bp->phy_addr = 1;
8359
8360         /* allocate stats_blk */
8361         rc = bnx2_alloc_stats_blk(dev);
8362         if (rc)
8363                 goto err_out_unmap;
8364
8365         /* Disable WOL support if we are running on a SERDES chip. */
8366         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8367                 bnx2_get_5709_media(bp);
8368         else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
8369                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8370
8371         bp->phy_port = PORT_TP;
8372         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8373                 bp->phy_port = PORT_FIBRE;
8374                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8375                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8376                         bp->flags |= BNX2_FLAG_NO_WOL;
8377                         bp->wol = 0;
8378                 }
8379                 if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
8380                         /* Don't do parallel detect on this board because of
8381                          * some board problems.  The link will not go down
8382                          * if we do parallel detect.
8383                          */
8384                         if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8385                             pdev->subsystem_device == 0x310c)
8386                                 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8387                 } else {
8388                         bp->phy_addr = 2;
8389                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8390                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8391                 }
8392         } else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
8393                    BNX2_CHIP(bp) == BNX2_CHIP_5708)
8394                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8395         else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8396                  (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
8397                   BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
8398                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8399
8400         bnx2_init_fw_cap(bp);
8401
8402         if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
8403             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
8404             (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
8405             !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8406                 bp->flags |= BNX2_FLAG_NO_WOL;
8407                 bp->wol = 0;
8408         }
8409
8410         if (bp->flags & BNX2_FLAG_NO_WOL)
8411                 device_set_wakeup_capable(&bp->pdev->dev, false);
8412         else
8413                 device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
8414
8415         if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8416                 bp->tx_quick_cons_trip_int =
8417                         bp->tx_quick_cons_trip;
8418                 bp->tx_ticks_int = bp->tx_ticks;
8419                 bp->rx_quick_cons_trip_int =
8420                         bp->rx_quick_cons_trip;
8421                 bp->rx_ticks_int = bp->rx_ticks;
8422                 bp->comp_prod_trip_int = bp->comp_prod_trip;
8423                 bp->com_ticks_int = bp->com_ticks;
8424                 bp->cmd_ticks_int = bp->cmd_ticks;
8425         }
8426
8427         /* Disable MSI on 5706 if AMD 8132 bridge is found.
8428          *
8429          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8430          * with byte enables disabled on the unused 32-bit word.  This is legal
8431          * but causes problems on the AMD 8132 which will eventually stop
8432          * responding after a while.
8433          *
8434          * AMD believes this incompatibility is unique to the 5706, and
8435          * prefers to locally disable MSI rather than globally disabling it.
8436          */
8437         if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
8438                 struct pci_dev *amd_8132 = NULL;
8439
8440                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8441                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
8442                                                   amd_8132))) {
8443
8444                         if (amd_8132->revision >= 0x10 &&
8445                             amd_8132->revision <= 0x13) {
8446                                 disable_msi = 1;
8447                                 pci_dev_put(amd_8132);
8448                                 break;
8449                         }
8450                 }
8451         }
8452
8453         bnx2_set_default_link(bp);
8454         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8455
8456         timer_setup(&bp->timer, bnx2_timer, 0);
8457         bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8458
8459 #ifdef BCM_CNIC
8460         if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8461                 bp->cnic_eth_dev.max_iscsi_conn =
8462                         (bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8463                          BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8464         bp->cnic_probe = bnx2_cnic_probe;
8465 #endif
8466         pci_save_state(pdev);
8467
8468         return 0;
8469
8470 err_out_unmap:
8471         if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8472                 pci_disable_pcie_error_reporting(pdev);
8473                 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8474         }
8475
8476         pci_iounmap(pdev, bp->regview);
8477         bp->regview = NULL;
8478
8479 err_out_release:
8480         pci_release_regions(pdev);
8481
8482 err_out_disable:
8483         pci_disable_device(pdev);
8484
8485 err_out:
8486         kfree(bp->temp_stats_blk);
8487
8488         return rc;
8489 }
8490
8491 static char *
8492 bnx2_bus_string(struct bnx2 *bp, char *str)
8493 {
8494         char *s = str;
8495
8496         if (bp->flags & BNX2_FLAG_PCIE) {
8497                 s += sprintf(s, "PCI Express");
8498         } else {
8499                 s += sprintf(s, "PCI");
8500                 if (bp->flags & BNX2_FLAG_PCIX)
8501                         s += sprintf(s, "-X");
8502                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8503                         s += sprintf(s, " 32-bit");
8504                 else
8505                         s += sprintf(s, " 64-bit");
8506                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8507         }
8508         return str;
8509 }
8510
8511 static void
8512 bnx2_del_napi(struct bnx2 *bp)
8513 {
8514         int i;
8515
8516         for (i = 0; i < bp->irq_nvecs; i++)
8517                 netif_napi_del(&bp->bnx2_napi[i].napi);
8518 }
8519
8520 static void
8521 bnx2_init_napi(struct bnx2 *bp)
8522 {
8523         int i;
8524
8525         for (i = 0; i < bp->irq_nvecs; i++) {
8526                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8527                 int (*poll)(struct napi_struct *, int);
8528
8529                 if (i == 0)
8530                         poll = bnx2_poll;
8531                 else
8532                         poll = bnx2_poll_msix;
8533
8534                 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8535                 bnapi->bp = bp;
8536         }
8537 }
8538
8539 static const struct net_device_ops bnx2_netdev_ops = {
8540         .ndo_open               = bnx2_open,
8541         .ndo_start_xmit         = bnx2_start_xmit,
8542         .ndo_stop               = bnx2_close,
8543         .ndo_get_stats64        = bnx2_get_stats64,
8544         .ndo_set_rx_mode        = bnx2_set_rx_mode,
8545         .ndo_do_ioctl           = bnx2_ioctl,
8546         .ndo_validate_addr      = eth_validate_addr,
8547         .ndo_set_mac_address    = bnx2_change_mac_addr,
8548         .ndo_change_mtu         = bnx2_change_mtu,
8549         .ndo_set_features       = bnx2_set_features,
8550         .ndo_tx_timeout         = bnx2_tx_timeout,
8551 #ifdef CONFIG_NET_POLL_CONTROLLER
8552         .ndo_poll_controller    = poll_bnx2,
8553 #endif
8554 };
8555
8556 static int
8557 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8558 {
8559         struct net_device *dev;
8560         struct bnx2 *bp;
8561         int rc;
8562         char str[40];
8563
8564         /* dev zeroed in init_etherdev */
8565         dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8566         if (!dev)
8567                 return -ENOMEM;
8568
8569         rc = bnx2_init_board(pdev, dev);
8570         if (rc < 0)
8571                 goto err_free;
8572
8573         dev->netdev_ops = &bnx2_netdev_ops;
8574         dev->watchdog_timeo = TX_TIMEOUT;
8575         dev->ethtool_ops = &bnx2_ethtool_ops;
8576
8577         bp = netdev_priv(dev);
8578
8579         pci_set_drvdata(pdev, dev);
8580
8581         /*
8582          * In-flight DMA from 1st kernel could continue going in kdump kernel.
8583          * New io-page table has been created before bnx2 does reset at open stage.
8584          * We have to wait for the in-flight DMA to complete to avoid it look up
8585          * into the newly created io-page table.
8586          */
8587         if (is_kdump_kernel())
8588                 bnx2_wait_dma_complete(bp);
8589
8590         memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
8591
8592         dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8593                 NETIF_F_TSO | NETIF_F_TSO_ECN |
8594                 NETIF_F_RXHASH | NETIF_F_RXCSUM;
8595
8596         if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8597                 dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8598
8599         dev->vlan_features = dev->hw_features;
8600         dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
8601         dev->features |= dev->hw_features;
8602         dev->priv_flags |= IFF_UNICAST_FLT;
8603         dev->min_mtu = MIN_ETHERNET_PACKET_SIZE;
8604         dev->max_mtu = MAX_ETHERNET_JUMBO_PACKET_SIZE;
8605
8606         if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
8607                 dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
8608
8609         if ((rc = register_netdev(dev))) {
8610                 dev_err(&pdev->dev, "Cannot register net device\n");
8611                 goto error;
8612         }
8613
8614         netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8615                     "node addr %pM\n", board_info[ent->driver_data].name,
8616                     ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8617                     ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
8618                     bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8619                     pdev->irq, dev->dev_addr);
8620
8621         return 0;
8622
8623 error:
8624         pci_iounmap(pdev, bp->regview);
8625         pci_release_regions(pdev);
8626         pci_disable_device(pdev);
8627 err_free:
8628         bnx2_free_stats_blk(dev);
8629         free_netdev(dev);
8630         return rc;
8631 }
8632
8633 static void
8634 bnx2_remove_one(struct pci_dev *pdev)
8635 {
8636         struct net_device *dev = pci_get_drvdata(pdev);
8637         struct bnx2 *bp = netdev_priv(dev);
8638
8639         unregister_netdev(dev);
8640
8641         del_timer_sync(&bp->timer);
8642         cancel_work_sync(&bp->reset_task);
8643
8644         pci_iounmap(bp->pdev, bp->regview);
8645
8646         bnx2_free_stats_blk(dev);
8647         kfree(bp->temp_stats_blk);
8648
8649         if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8650                 pci_disable_pcie_error_reporting(pdev);
8651                 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8652         }
8653
8654         bnx2_release_firmware(bp);
8655
8656         free_netdev(dev);
8657
8658         pci_release_regions(pdev);
8659         pci_disable_device(pdev);
8660 }
8661
8662 #ifdef CONFIG_PM_SLEEP
8663 static int
8664 bnx2_suspend(struct device *device)
8665 {
8666         struct net_device *dev = dev_get_drvdata(device);
8667         struct bnx2 *bp = netdev_priv(dev);
8668
8669         if (netif_running(dev)) {
8670                 cancel_work_sync(&bp->reset_task);
8671                 bnx2_netif_stop(bp, true);
8672                 netif_device_detach(dev);
8673                 del_timer_sync(&bp->timer);
8674                 bnx2_shutdown_chip(bp);
8675                 __bnx2_free_irq(bp);
8676                 bnx2_free_skbs(bp);
8677         }
8678         bnx2_setup_wol(bp);
8679         return 0;
8680 }
8681
8682 static int
8683 bnx2_resume(struct device *device)
8684 {
8685         struct net_device *dev = dev_get_drvdata(device);
8686         struct bnx2 *bp = netdev_priv(dev);
8687
8688         if (!netif_running(dev))
8689                 return 0;
8690
8691         bnx2_set_power_state(bp, PCI_D0);
8692         netif_device_attach(dev);
8693         bnx2_request_irq(bp);
8694         bnx2_init_nic(bp, 1);
8695         bnx2_netif_start(bp, true);
8696         return 0;
8697 }
8698
8699 static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
8700 #define BNX2_PM_OPS (&bnx2_pm_ops)
8701
8702 #else
8703
8704 #define BNX2_PM_OPS NULL
8705
8706 #endif /* CONFIG_PM_SLEEP */
8707 /**
8708  * bnx2_io_error_detected - called when PCI error is detected
8709  * @pdev: Pointer to PCI device
8710  * @state: The current pci connection state
8711  *
8712  * This function is called after a PCI bus error affecting
8713  * this device has been detected.
8714  */
8715 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8716                                                pci_channel_state_t state)
8717 {
8718         struct net_device *dev = pci_get_drvdata(pdev);
8719         struct bnx2 *bp = netdev_priv(dev);
8720
8721         rtnl_lock();
8722         netif_device_detach(dev);
8723
8724         if (state == pci_channel_io_perm_failure) {
8725                 rtnl_unlock();
8726                 return PCI_ERS_RESULT_DISCONNECT;
8727         }
8728
8729         if (netif_running(dev)) {
8730                 bnx2_netif_stop(bp, true);
8731                 del_timer_sync(&bp->timer);
8732                 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8733         }
8734
8735         pci_disable_device(pdev);
8736         rtnl_unlock();
8737
8738         /* Request a slot slot reset. */
8739         return PCI_ERS_RESULT_NEED_RESET;
8740 }
8741
8742 /**
8743  * bnx2_io_slot_reset - called after the pci bus has been reset.
8744  * @pdev: Pointer to PCI device
8745  *
8746  * Restart the card from scratch, as if from a cold-boot.
8747  */
8748 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8749 {
8750         struct net_device *dev = pci_get_drvdata(pdev);
8751         struct bnx2 *bp = netdev_priv(dev);
8752         pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
8753         int err = 0;
8754
8755         rtnl_lock();
8756         if (pci_enable_device(pdev)) {
8757                 dev_err(&pdev->dev,
8758                         "Cannot re-enable PCI device after reset\n");
8759         } else {
8760                 pci_set_master(pdev);
8761                 pci_restore_state(pdev);
8762                 pci_save_state(pdev);
8763
8764                 if (netif_running(dev))
8765                         err = bnx2_init_nic(bp, 1);
8766
8767                 if (!err)
8768                         result = PCI_ERS_RESULT_RECOVERED;
8769         }
8770
8771         if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
8772                 bnx2_napi_enable(bp);
8773                 dev_close(dev);
8774         }
8775         rtnl_unlock();
8776
8777         if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8778                 return result;
8779
8780         return result;
8781 }
8782
8783 /**
8784  * bnx2_io_resume - called when traffic can start flowing again.
8785  * @pdev: Pointer to PCI device
8786  *
8787  * This callback is called when the error recovery driver tells us that
8788  * its OK to resume normal operation.
8789  */
8790 static void bnx2_io_resume(struct pci_dev *pdev)
8791 {
8792         struct net_device *dev = pci_get_drvdata(pdev);
8793         struct bnx2 *bp = netdev_priv(dev);
8794
8795         rtnl_lock();
8796         if (netif_running(dev))
8797                 bnx2_netif_start(bp, true);
8798
8799         netif_device_attach(dev);
8800         rtnl_unlock();
8801 }
8802
8803 static void bnx2_shutdown(struct pci_dev *pdev)
8804 {
8805         struct net_device *dev = pci_get_drvdata(pdev);
8806         struct bnx2 *bp;
8807
8808         if (!dev)
8809                 return;
8810
8811         bp = netdev_priv(dev);
8812         if (!bp)
8813                 return;
8814
8815         rtnl_lock();
8816         if (netif_running(dev))
8817                 dev_close(bp->dev);
8818
8819         if (system_state == SYSTEM_POWER_OFF)
8820                 bnx2_set_power_state(bp, PCI_D3hot);
8821
8822         rtnl_unlock();
8823 }
8824
8825 static const struct pci_error_handlers bnx2_err_handler = {
8826         .error_detected = bnx2_io_error_detected,
8827         .slot_reset     = bnx2_io_slot_reset,
8828         .resume         = bnx2_io_resume,
8829 };
8830
8831 static struct pci_driver bnx2_pci_driver = {
8832         .name           = DRV_MODULE_NAME,
8833         .id_table       = bnx2_pci_tbl,
8834         .probe          = bnx2_init_one,
8835         .remove         = bnx2_remove_one,
8836         .driver.pm      = BNX2_PM_OPS,
8837         .err_handler    = &bnx2_err_handler,
8838         .shutdown       = bnx2_shutdown,
8839 };
8840
8841 module_pci_driver(bnx2_pci_driver);