2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
8 #include <linux/kernel.h>
9 #include <linux/bitops.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
27 #include <linux/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/if_vlan.h>
37 #include <linux/skbuff.h>
38 #include <linux/delay.h>
40 #include <linux/vmalloc.h>
41 #include <linux/prefetch.h>
42 #include <net/ip6_checksum.h>
46 char qlge_driver_name[] = DRV_NAME;
47 const char qlge_driver_version[] = DRV_VERSION;
49 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
50 MODULE_DESCRIPTION(DRV_STRING " ");
51 MODULE_LICENSE("GPL");
52 MODULE_VERSION(DRV_VERSION);
54 static const u32 default_msg =
55 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
56 /* NETIF_MSG_TIMER | */
61 /* NETIF_MSG_TX_QUEUED | */
62 /* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
63 /* NETIF_MSG_PKTDATA | */
64 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
66 static int debug = -1; /* defaults above */
67 module_param(debug, int, 0664);
68 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
73 static int qlge_irq_type = MSIX_IRQ;
74 module_param(qlge_irq_type, int, 0664);
75 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
77 static int qlge_mpi_coredump;
78 module_param(qlge_mpi_coredump, int, 0);
79 MODULE_PARM_DESC(qlge_mpi_coredump,
80 "Option to enable MPI firmware dump. "
81 "Default is OFF - Do Not allocate memory. ");
83 static int qlge_force_coredump;
84 module_param(qlge_force_coredump, int, 0);
85 MODULE_PARM_DESC(qlge_force_coredump,
86 "Option to allow force of firmware core dump. "
87 "Default is OFF - Do not allow.");
89 static const struct pci_device_id qlge_pci_tbl[] = {
90 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
91 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
92 /* required last entry */
96 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
98 static int ql_wol(struct ql_adapter *);
99 static void qlge_set_multicast_list(struct net_device *);
100 static int ql_adapter_down(struct ql_adapter *);
101 static int ql_adapter_up(struct ql_adapter *);
103 /* This hardware semaphore causes exclusive access to
104 * resources shared between the NIC driver, MPI firmware,
105 * FCOE firmware and the FC driver.
107 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
112 case SEM_XGMAC0_MASK:
113 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
115 case SEM_XGMAC1_MASK:
116 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
119 sem_bits = SEM_SET << SEM_ICB_SHIFT;
121 case SEM_MAC_ADDR_MASK:
122 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
125 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
128 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
130 case SEM_RT_IDX_MASK:
131 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
133 case SEM_PROC_REG_MASK:
134 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
137 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
141 ql_write32(qdev, SEM, sem_bits | sem_mask);
142 return !(ql_read32(qdev, SEM) & sem_bits);
145 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
147 unsigned int wait_count = 30;
149 if (!ql_sem_trylock(qdev, sem_mask))
152 } while (--wait_count);
156 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
158 ql_write32(qdev, SEM, sem_mask);
159 ql_read32(qdev, SEM); /* flush */
162 /* This function waits for a specific bit to come ready
163 * in a given register. It is used mostly by the initialize
164 * process, but is also used in kernel thread API such as
165 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
167 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
170 int count = UDELAY_COUNT;
173 temp = ql_read32(qdev, reg);
175 /* check for errors */
176 if (temp & err_bit) {
177 netif_alert(qdev, probe, qdev->ndev,
178 "register 0x%.08x access error, value = 0x%.08x!.\n",
181 } else if (temp & bit)
183 udelay(UDELAY_DELAY);
186 netif_alert(qdev, probe, qdev->ndev,
187 "Timed out waiting for reg %x to come ready.\n", reg);
191 /* The CFG register is used to download TX and RX control blocks
192 * to the chip. This function waits for an operation to complete.
194 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
196 int count = UDELAY_COUNT;
200 temp = ql_read32(qdev, CFG);
205 udelay(UDELAY_DELAY);
212 /* Used to issue init control blocks to hw. Maps control block,
213 * sets address, triggers download, waits for completion.
215 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
225 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
228 map = pci_map_single(qdev->pdev, ptr, size, direction);
229 if (pci_dma_mapping_error(qdev->pdev, map)) {
230 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
234 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
238 status = ql_wait_cfg(qdev, bit);
240 netif_err(qdev, ifup, qdev->ndev,
241 "Timed out waiting for CFG to come ready.\n");
245 ql_write32(qdev, ICB_L, (u32) map);
246 ql_write32(qdev, ICB_H, (u32) (map >> 32));
248 mask = CFG_Q_MASK | (bit << 16);
249 value = bit | (q_id << CFG_Q_SHIFT);
250 ql_write32(qdev, CFG, (mask | value));
253 * Wait for the bit to clear after signaling hw.
255 status = ql_wait_cfg(qdev, bit);
257 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
258 pci_unmap_single(qdev->pdev, map, size, direction);
262 /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
263 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
270 case MAC_ADDR_TYPE_MULTI_MAC:
271 case MAC_ADDR_TYPE_CAM_MAC:
274 ql_wait_reg_rdy(qdev,
275 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
278 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
279 (index << MAC_ADDR_IDX_SHIFT) | /* index */
280 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
282 ql_wait_reg_rdy(qdev,
283 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
286 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
288 ql_wait_reg_rdy(qdev,
289 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
292 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
293 (index << MAC_ADDR_IDX_SHIFT) | /* index */
294 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
296 ql_wait_reg_rdy(qdev,
297 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
300 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
301 if (type == MAC_ADDR_TYPE_CAM_MAC) {
303 ql_wait_reg_rdy(qdev,
304 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
307 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
308 (index << MAC_ADDR_IDX_SHIFT) | /* index */
309 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
311 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
315 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
319 case MAC_ADDR_TYPE_VLAN:
320 case MAC_ADDR_TYPE_MULTI_FLTR:
322 netif_crit(qdev, ifup, qdev->ndev,
323 "Address type %d not yet supported.\n", type);
330 /* Set up a MAC, multicast or VLAN address for the
331 * inbound frame matching.
333 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
340 case MAC_ADDR_TYPE_MULTI_MAC:
342 u32 upper = (addr[0] << 8) | addr[1];
343 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
344 (addr[4] << 8) | (addr[5]);
347 ql_wait_reg_rdy(qdev,
348 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
351 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
352 (index << MAC_ADDR_IDX_SHIFT) |
354 ql_write32(qdev, MAC_ADDR_DATA, lower);
356 ql_wait_reg_rdy(qdev,
357 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
360 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
361 (index << MAC_ADDR_IDX_SHIFT) |
364 ql_write32(qdev, MAC_ADDR_DATA, upper);
366 ql_wait_reg_rdy(qdev,
367 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
372 case MAC_ADDR_TYPE_CAM_MAC:
375 u32 upper = (addr[0] << 8) | addr[1];
377 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
380 ql_wait_reg_rdy(qdev,
381 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
384 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
385 (index << MAC_ADDR_IDX_SHIFT) | /* index */
387 ql_write32(qdev, MAC_ADDR_DATA, lower);
389 ql_wait_reg_rdy(qdev,
390 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
393 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
394 (index << MAC_ADDR_IDX_SHIFT) | /* index */
396 ql_write32(qdev, MAC_ADDR_DATA, upper);
398 ql_wait_reg_rdy(qdev,
399 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
402 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
403 (index << MAC_ADDR_IDX_SHIFT) | /* index */
405 /* This field should also include the queue id
406 and possibly the function id. Right now we hardcode
407 the route field to NIC core.
409 cam_output = (CAM_OUT_ROUTE_NIC |
411 func << CAM_OUT_FUNC_SHIFT) |
412 (0 << CAM_OUT_CQ_ID_SHIFT));
413 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
414 cam_output |= CAM_OUT_RV;
415 /* route to NIC core */
416 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
419 case MAC_ADDR_TYPE_VLAN:
421 u32 enable_bit = *((u32 *) &addr[0]);
422 /* For VLAN, the addr actually holds a bit that
423 * either enables or disables the vlan id we are
424 * addressing. It's either MAC_ADDR_E on or off.
425 * That's bit-27 we're talking about.
428 ql_wait_reg_rdy(qdev,
429 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
432 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
433 (index << MAC_ADDR_IDX_SHIFT) | /* index */
435 enable_bit); /* enable/disable */
438 case MAC_ADDR_TYPE_MULTI_FLTR:
440 netif_crit(qdev, ifup, qdev->ndev,
441 "Address type %d not yet supported.\n", type);
448 /* Set or clear MAC address in hardware. We sometimes
449 * have to clear it to prevent wrong frame routing
450 * especially in a bonding environment.
452 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
455 char zero_mac_addr[ETH_ALEN];
459 addr = &qdev->current_mac_addr[0];
460 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
461 "Set Mac addr %pM\n", addr);
463 eth_zero_addr(zero_mac_addr);
464 addr = &zero_mac_addr[0];
465 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
466 "Clearing MAC address\n");
468 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
471 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
472 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
473 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
475 netif_err(qdev, ifup, qdev->ndev,
476 "Failed to init mac address.\n");
480 void ql_link_on(struct ql_adapter *qdev)
482 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
483 netif_carrier_on(qdev->ndev);
484 ql_set_mac_addr(qdev, 1);
487 void ql_link_off(struct ql_adapter *qdev)
489 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
490 netif_carrier_off(qdev->ndev);
491 ql_set_mac_addr(qdev, 0);
494 /* Get a specific frame routing value from the CAM.
495 * Used for debug and reg dump.
497 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
501 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
505 ql_write32(qdev, RT_IDX,
506 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
507 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
510 *value = ql_read32(qdev, RT_DATA);
515 /* The NIC function for this chip has 16 routing indexes. Each one can be used
516 * to route different frame types to various inbound queues. We send broadcast/
517 * multicast/error frames to the default queue for slow handling,
518 * and CAM hit/RSS frames to the fast handling queues.
520 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
523 int status = -EINVAL; /* Return error if no mask match. */
529 value = RT_IDX_DST_CAM_Q | /* dest */
530 RT_IDX_TYPE_NICQ | /* type */
531 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
534 case RT_IDX_VALID: /* Promiscuous Mode frames. */
536 value = RT_IDX_DST_DFLT_Q | /* dest */
537 RT_IDX_TYPE_NICQ | /* type */
538 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
541 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
543 value = RT_IDX_DST_DFLT_Q | /* dest */
544 RT_IDX_TYPE_NICQ | /* type */
545 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
548 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
550 value = RT_IDX_DST_DFLT_Q | /* dest */
551 RT_IDX_TYPE_NICQ | /* type */
552 (RT_IDX_IP_CSUM_ERR_SLOT <<
553 RT_IDX_IDX_SHIFT); /* index */
556 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
558 value = RT_IDX_DST_DFLT_Q | /* dest */
559 RT_IDX_TYPE_NICQ | /* type */
560 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
561 RT_IDX_IDX_SHIFT); /* index */
564 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
566 value = RT_IDX_DST_DFLT_Q | /* dest */
567 RT_IDX_TYPE_NICQ | /* type */
568 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
571 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
573 value = RT_IDX_DST_DFLT_Q | /* dest */
574 RT_IDX_TYPE_NICQ | /* type */
575 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
578 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
580 value = RT_IDX_DST_DFLT_Q | /* dest */
581 RT_IDX_TYPE_NICQ | /* type */
582 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
585 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
587 value = RT_IDX_DST_RSS | /* dest */
588 RT_IDX_TYPE_NICQ | /* type */
589 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
592 case 0: /* Clear the E-bit on an entry. */
594 value = RT_IDX_DST_DFLT_Q | /* dest */
595 RT_IDX_TYPE_NICQ | /* type */
596 (index << RT_IDX_IDX_SHIFT);/* index */
600 netif_err(qdev, ifup, qdev->ndev,
601 "Mask type %d not yet supported.\n", mask);
607 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
610 value |= (enable ? RT_IDX_E : 0);
611 ql_write32(qdev, RT_IDX, value);
612 ql_write32(qdev, RT_DATA, enable ? mask : 0);
618 static void ql_enable_interrupts(struct ql_adapter *qdev)
620 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
623 static void ql_disable_interrupts(struct ql_adapter *qdev)
625 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
628 /* If we're running with multiple MSI-X vectors then we enable on the fly.
629 * Otherwise, we may have multiple outstanding workers and don't want to
630 * enable until the last one finishes. In this case, the irq_cnt gets
631 * incremented every time we queue a worker and decremented every time
632 * a worker finishes. Once it hits zero we enable the interrupt.
634 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
637 unsigned long hw_flags = 0;
638 struct intr_context *ctx = qdev->intr_context + intr;
640 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
641 /* Always enable if we're MSIX multi interrupts and
642 * it's not the default (zeroeth) interrupt.
644 ql_write32(qdev, INTR_EN,
646 var = ql_read32(qdev, STS);
650 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
651 if (atomic_dec_and_test(&ctx->irq_cnt)) {
652 ql_write32(qdev, INTR_EN,
654 var = ql_read32(qdev, STS);
656 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
660 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
663 struct intr_context *ctx;
665 /* HW disables for us if we're MSIX multi interrupts and
666 * it's not the default (zeroeth) interrupt.
668 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
671 ctx = qdev->intr_context + intr;
672 spin_lock(&qdev->hw_lock);
673 if (!atomic_read(&ctx->irq_cnt)) {
674 ql_write32(qdev, INTR_EN,
676 var = ql_read32(qdev, STS);
678 atomic_inc(&ctx->irq_cnt);
679 spin_unlock(&qdev->hw_lock);
683 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
686 for (i = 0; i < qdev->intr_count; i++) {
687 /* The enable call does a atomic_dec_and_test
688 * and enables only if the result is zero.
689 * So we precharge it here.
691 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
693 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
694 ql_enable_completion_interrupt(qdev, i);
699 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
703 __le16 *flash = (__le16 *)&qdev->flash;
705 status = strncmp((char *)&qdev->flash, str, 4);
707 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
711 for (i = 0; i < size; i++)
712 csum += le16_to_cpu(*flash++);
715 netif_err(qdev, ifup, qdev->ndev,
716 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
721 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
724 /* wait for reg to come ready */
725 status = ql_wait_reg_rdy(qdev,
726 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
729 /* set up for reg read */
730 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
731 /* wait for reg to come ready */
732 status = ql_wait_reg_rdy(qdev,
733 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
736 /* This data is stored on flash as an array of
737 * __le32. Since ql_read32() returns cpu endian
738 * we need to swap it back.
740 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
745 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
749 __le32 *p = (__le32 *)&qdev->flash;
753 /* Get flash offset for function and adjust
757 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
759 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
761 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
764 size = sizeof(struct flash_params_8000) / sizeof(u32);
765 for (i = 0; i < size; i++, p++) {
766 status = ql_read_flash_word(qdev, i+offset, p);
768 netif_err(qdev, ifup, qdev->ndev,
769 "Error reading flash.\n");
774 status = ql_validate_flash(qdev,
775 sizeof(struct flash_params_8000) / sizeof(u16),
778 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
783 /* Extract either manufacturer or BOFM modified
786 if (qdev->flash.flash_params_8000.data_type1 == 2)
788 qdev->flash.flash_params_8000.mac_addr1,
789 qdev->ndev->addr_len);
792 qdev->flash.flash_params_8000.mac_addr,
793 qdev->ndev->addr_len);
795 if (!is_valid_ether_addr(mac_addr)) {
796 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
801 memcpy(qdev->ndev->dev_addr,
803 qdev->ndev->addr_len);
806 ql_sem_unlock(qdev, SEM_FLASH_MASK);
810 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
814 __le32 *p = (__le32 *)&qdev->flash;
816 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
818 /* Second function's parameters follow the first
824 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
827 for (i = 0; i < size; i++, p++) {
828 status = ql_read_flash_word(qdev, i+offset, p);
830 netif_err(qdev, ifup, qdev->ndev,
831 "Error reading flash.\n");
837 status = ql_validate_flash(qdev,
838 sizeof(struct flash_params_8012) / sizeof(u16),
841 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
846 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
851 memcpy(qdev->ndev->dev_addr,
852 qdev->flash.flash_params_8012.mac_addr,
853 qdev->ndev->addr_len);
856 ql_sem_unlock(qdev, SEM_FLASH_MASK);
860 /* xgmac register are located behind the xgmac_addr and xgmac_data
861 * register pair. Each read/write requires us to wait for the ready
862 * bit before reading/writing the data.
864 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
867 /* wait for reg to come ready */
868 status = ql_wait_reg_rdy(qdev,
869 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
872 /* write the data to the data reg */
873 ql_write32(qdev, XGMAC_DATA, data);
874 /* trigger the write */
875 ql_write32(qdev, XGMAC_ADDR, reg);
879 /* xgmac register are located behind the xgmac_addr and xgmac_data
880 * register pair. Each read/write requires us to wait for the ready
881 * bit before reading/writing the data.
883 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
886 /* wait for reg to come ready */
887 status = ql_wait_reg_rdy(qdev,
888 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
891 /* set up for reg read */
892 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
893 /* wait for reg to come ready */
894 status = ql_wait_reg_rdy(qdev,
895 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
899 *data = ql_read32(qdev, XGMAC_DATA);
904 /* This is used for reading the 64-bit statistics regs. */
905 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
911 status = ql_read_xgmac_reg(qdev, reg, &lo);
915 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
919 *data = (u64) lo | ((u64) hi << 32);
925 static int ql_8000_port_initialize(struct ql_adapter *qdev)
929 * Get MPI firmware version for driver banner
932 status = ql_mb_about_fw(qdev);
935 status = ql_mb_get_fw_state(qdev);
938 /* Wake up a worker to get/set the TX/RX frame sizes. */
939 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
944 /* Take the MAC Core out of reset.
945 * Enable statistics counting.
946 * Take the transmitter/receiver out of reset.
947 * This functionality may be done in the MPI firmware at a
950 static int ql_8012_port_initialize(struct ql_adapter *qdev)
955 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
956 /* Another function has the semaphore, so
957 * wait for the port init bit to come ready.
959 netif_info(qdev, link, qdev->ndev,
960 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
961 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
963 netif_crit(qdev, link, qdev->ndev,
964 "Port initialize timed out.\n");
969 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
970 /* Set the core reset. */
971 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
974 data |= GLOBAL_CFG_RESET;
975 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
979 /* Clear the core reset and turn on jumbo for receiver. */
980 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
981 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
982 data |= GLOBAL_CFG_TX_STAT_EN;
983 data |= GLOBAL_CFG_RX_STAT_EN;
984 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
988 /* Enable transmitter, and clear it's reset. */
989 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
992 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
993 data |= TX_CFG_EN; /* Enable the transmitter. */
994 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
998 /* Enable receiver and clear it's reset. */
999 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1002 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1003 data |= RX_CFG_EN; /* Enable the receiver. */
1004 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1008 /* Turn on jumbo. */
1010 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1014 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1018 /* Signal to the world that the port is enabled. */
1019 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1021 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1025 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1027 return PAGE_SIZE << qdev->lbq_buf_order;
1030 /* Get the next large buffer. */
1031 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1033 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1034 rx_ring->lbq_curr_idx++;
1035 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1036 rx_ring->lbq_curr_idx = 0;
1037 rx_ring->lbq_free_cnt++;
1041 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1042 struct rx_ring *rx_ring)
1044 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1046 pci_dma_sync_single_for_cpu(qdev->pdev,
1047 dma_unmap_addr(lbq_desc, mapaddr),
1048 rx_ring->lbq_buf_size,
1049 PCI_DMA_FROMDEVICE);
1051 /* If it's the last chunk of our master page then
1054 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1055 == ql_lbq_block_size(qdev))
1056 pci_unmap_page(qdev->pdev,
1057 lbq_desc->p.pg_chunk.map,
1058 ql_lbq_block_size(qdev),
1059 PCI_DMA_FROMDEVICE);
1063 /* Get the next small buffer. */
1064 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1066 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1067 rx_ring->sbq_curr_idx++;
1068 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1069 rx_ring->sbq_curr_idx = 0;
1070 rx_ring->sbq_free_cnt++;
1074 /* Update an rx ring index. */
1075 static void ql_update_cq(struct rx_ring *rx_ring)
1077 rx_ring->cnsmr_idx++;
1078 rx_ring->curr_entry++;
1079 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1080 rx_ring->cnsmr_idx = 0;
1081 rx_ring->curr_entry = rx_ring->cq_base;
1085 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1087 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1090 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1091 struct bq_desc *lbq_desc)
1093 if (!rx_ring->pg_chunk.page) {
1095 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1097 qdev->lbq_buf_order);
1098 if (unlikely(!rx_ring->pg_chunk.page)) {
1099 netif_err(qdev, drv, qdev->ndev,
1100 "page allocation failed.\n");
1103 rx_ring->pg_chunk.offset = 0;
1104 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1105 0, ql_lbq_block_size(qdev),
1106 PCI_DMA_FROMDEVICE);
1107 if (pci_dma_mapping_error(qdev->pdev, map)) {
1108 __free_pages(rx_ring->pg_chunk.page,
1109 qdev->lbq_buf_order);
1110 rx_ring->pg_chunk.page = NULL;
1111 netif_err(qdev, drv, qdev->ndev,
1112 "PCI mapping failed.\n");
1115 rx_ring->pg_chunk.map = map;
1116 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1119 /* Copy the current master pg_chunk info
1120 * to the current descriptor.
1122 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1124 /* Adjust the master page chunk for next
1127 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1128 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1129 rx_ring->pg_chunk.page = NULL;
1130 lbq_desc->p.pg_chunk.last_flag = 1;
1132 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1133 get_page(rx_ring->pg_chunk.page);
1134 lbq_desc->p.pg_chunk.last_flag = 0;
1138 /* Process (refill) a large buffer queue. */
1139 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1141 u32 clean_idx = rx_ring->lbq_clean_idx;
1142 u32 start_idx = clean_idx;
1143 struct bq_desc *lbq_desc;
1147 while (rx_ring->lbq_free_cnt > 32) {
1148 for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
1149 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1150 "lbq: try cleaning clean_idx = %d.\n",
1152 lbq_desc = &rx_ring->lbq[clean_idx];
1153 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1154 rx_ring->lbq_clean_idx = clean_idx;
1155 netif_err(qdev, ifup, qdev->ndev,
1156 "Could not get a page chunk, i=%d, clean_idx =%d .\n",
1161 map = lbq_desc->p.pg_chunk.map +
1162 lbq_desc->p.pg_chunk.offset;
1163 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1164 dma_unmap_len_set(lbq_desc, maplen,
1165 rx_ring->lbq_buf_size);
1166 *lbq_desc->addr = cpu_to_le64(map);
1168 pci_dma_sync_single_for_device(qdev->pdev, map,
1169 rx_ring->lbq_buf_size,
1170 PCI_DMA_FROMDEVICE);
1172 if (clean_idx == rx_ring->lbq_len)
1176 rx_ring->lbq_clean_idx = clean_idx;
1177 rx_ring->lbq_prod_idx += 16;
1178 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1179 rx_ring->lbq_prod_idx = 0;
1180 rx_ring->lbq_free_cnt -= 16;
1183 if (start_idx != clean_idx) {
1184 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1185 "lbq: updating prod idx = %d.\n",
1186 rx_ring->lbq_prod_idx);
1187 ql_write_db_reg(rx_ring->lbq_prod_idx,
1188 rx_ring->lbq_prod_idx_db_reg);
1192 /* Process (refill) a small buffer queue. */
1193 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1195 u32 clean_idx = rx_ring->sbq_clean_idx;
1196 u32 start_idx = clean_idx;
1197 struct bq_desc *sbq_desc;
1201 while (rx_ring->sbq_free_cnt > 16) {
1202 for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
1203 sbq_desc = &rx_ring->sbq[clean_idx];
1204 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1205 "sbq: try cleaning clean_idx = %d.\n",
1207 if (sbq_desc->p.skb == NULL) {
1208 netif_printk(qdev, rx_status, KERN_DEBUG,
1210 "sbq: getting new skb for index %d.\n",
1213 netdev_alloc_skb(qdev->ndev,
1215 if (sbq_desc->p.skb == NULL) {
1216 rx_ring->sbq_clean_idx = clean_idx;
1219 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1220 map = pci_map_single(qdev->pdev,
1221 sbq_desc->p.skb->data,
1222 rx_ring->sbq_buf_size,
1223 PCI_DMA_FROMDEVICE);
1224 if (pci_dma_mapping_error(qdev->pdev, map)) {
1225 netif_err(qdev, ifup, qdev->ndev,
1226 "PCI mapping failed.\n");
1227 rx_ring->sbq_clean_idx = clean_idx;
1228 dev_kfree_skb_any(sbq_desc->p.skb);
1229 sbq_desc->p.skb = NULL;
1232 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1233 dma_unmap_len_set(sbq_desc, maplen,
1234 rx_ring->sbq_buf_size);
1235 *sbq_desc->addr = cpu_to_le64(map);
1239 if (clean_idx == rx_ring->sbq_len)
1242 rx_ring->sbq_clean_idx = clean_idx;
1243 rx_ring->sbq_prod_idx += 16;
1244 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1245 rx_ring->sbq_prod_idx = 0;
1246 rx_ring->sbq_free_cnt -= 16;
1249 if (start_idx != clean_idx) {
1250 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1251 "sbq: updating prod idx = %d.\n",
1252 rx_ring->sbq_prod_idx);
1253 ql_write_db_reg(rx_ring->sbq_prod_idx,
1254 rx_ring->sbq_prod_idx_db_reg);
1258 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1259 struct rx_ring *rx_ring)
1261 ql_update_sbq(qdev, rx_ring);
1262 ql_update_lbq(qdev, rx_ring);
1265 /* Unmaps tx buffers. Can be called from send() if a pci mapping
1266 * fails at some stage, or from the interrupt when a tx completes.
1268 static void ql_unmap_send(struct ql_adapter *qdev,
1269 struct tx_ring_desc *tx_ring_desc, int mapped)
1272 for (i = 0; i < mapped; i++) {
1273 if (i == 0 || (i == 7 && mapped > 7)) {
1275 * Unmap the skb->data area, or the
1276 * external sglist (AKA the Outbound
1277 * Address List (OAL)).
1278 * If its the zeroeth element, then it's
1279 * the skb->data area. If it's the 7th
1280 * element and there is more than 6 frags,
1284 netif_printk(qdev, tx_done, KERN_DEBUG,
1286 "unmapping OAL area.\n");
1288 pci_unmap_single(qdev->pdev,
1289 dma_unmap_addr(&tx_ring_desc->map[i],
1291 dma_unmap_len(&tx_ring_desc->map[i],
1295 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1296 "unmapping frag %d.\n", i);
1297 pci_unmap_page(qdev->pdev,
1298 dma_unmap_addr(&tx_ring_desc->map[i],
1300 dma_unmap_len(&tx_ring_desc->map[i],
1301 maplen), PCI_DMA_TODEVICE);
1307 /* Map the buffers for this transmit. This will return
1308 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1310 static int ql_map_send(struct ql_adapter *qdev,
1311 struct ob_mac_iocb_req *mac_iocb_ptr,
1312 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1314 int len = skb_headlen(skb);
1316 int frag_idx, err, map_idx = 0;
1317 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1318 int frag_cnt = skb_shinfo(skb)->nr_frags;
1321 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1322 "frag_cnt = %d.\n", frag_cnt);
1325 * Map the skb buffer first.
1327 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1329 err = pci_dma_mapping_error(qdev->pdev, map);
1331 netif_err(qdev, tx_queued, qdev->ndev,
1332 "PCI mapping failed with error: %d\n", err);
1334 return NETDEV_TX_BUSY;
1337 tbd->len = cpu_to_le32(len);
1338 tbd->addr = cpu_to_le64(map);
1339 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1340 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1344 * This loop fills the remainder of the 8 address descriptors
1345 * in the IOCB. If there are more than 7 fragments, then the
1346 * eighth address desc will point to an external list (OAL).
1347 * When this happens, the remainder of the frags will be stored
1350 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1351 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1353 if (frag_idx == 6 && frag_cnt > 7) {
1354 /* Let's tack on an sglist.
1355 * Our control block will now
1357 * iocb->seg[0] = skb->data
1358 * iocb->seg[1] = frag[0]
1359 * iocb->seg[2] = frag[1]
1360 * iocb->seg[3] = frag[2]
1361 * iocb->seg[4] = frag[3]
1362 * iocb->seg[5] = frag[4]
1363 * iocb->seg[6] = frag[5]
1364 * iocb->seg[7] = ptr to OAL (external sglist)
1365 * oal->seg[0] = frag[6]
1366 * oal->seg[1] = frag[7]
1367 * oal->seg[2] = frag[8]
1368 * oal->seg[3] = frag[9]
1369 * oal->seg[4] = frag[10]
1372 /* Tack on the OAL in the eighth segment of IOCB. */
1373 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1376 err = pci_dma_mapping_error(qdev->pdev, map);
1378 netif_err(qdev, tx_queued, qdev->ndev,
1379 "PCI mapping outbound address list with error: %d\n",
1384 tbd->addr = cpu_to_le64(map);
1386 * The length is the number of fragments
1387 * that remain to be mapped times the length
1388 * of our sglist (OAL).
1391 cpu_to_le32((sizeof(struct tx_buf_desc) *
1392 (frag_cnt - frag_idx)) | TX_DESC_C);
1393 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1395 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1396 sizeof(struct oal));
1397 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1401 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1404 err = dma_mapping_error(&qdev->pdev->dev, map);
1406 netif_err(qdev, tx_queued, qdev->ndev,
1407 "PCI mapping frags failed with error: %d.\n",
1412 tbd->addr = cpu_to_le64(map);
1413 tbd->len = cpu_to_le32(skb_frag_size(frag));
1414 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1415 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1416 skb_frag_size(frag));
1419 /* Save the number of segments we've mapped. */
1420 tx_ring_desc->map_cnt = map_idx;
1421 /* Terminate the last segment. */
1422 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1423 return NETDEV_TX_OK;
1427 * If the first frag mapping failed, then i will be zero.
1428 * This causes the unmap of the skb->data area. Otherwise
1429 * we pass in the number of frags that mapped successfully
1430 * so they can be umapped.
1432 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1433 return NETDEV_TX_BUSY;
1436 /* Categorizing receive firmware frame errors */
1437 static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
1438 struct rx_ring *rx_ring)
1440 struct nic_stats *stats = &qdev->nic_stats;
1442 stats->rx_err_count++;
1443 rx_ring->rx_errors++;
1445 switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1446 case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1447 stats->rx_code_err++;
1449 case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1450 stats->rx_oversize_err++;
1452 case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1453 stats->rx_undersize_err++;
1455 case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1456 stats->rx_preamble_err++;
1458 case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1459 stats->rx_frame_len_err++;
1461 case IB_MAC_IOCB_RSP_ERR_CRC:
1462 stats->rx_crc_err++;
1469 * ql_update_mac_hdr_len - helper routine to update the mac header length
1470 * based on vlan tags if present
1472 static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
1473 struct ib_mac_iocb_rsp *ib_mac_rsp,
1474 void *page, size_t *len)
1478 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1480 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
1482 /* Look for stacked vlan tags in ethertype field */
1483 if (tags[6] == ETH_P_8021Q &&
1484 tags[8] == ETH_P_8021Q)
1485 *len += 2 * VLAN_HLEN;
1491 /* Process an inbound completion from an rx ring. */
1492 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1493 struct rx_ring *rx_ring,
1494 struct ib_mac_iocb_rsp *ib_mac_rsp,
1498 struct sk_buff *skb;
1499 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1500 struct napi_struct *napi = &rx_ring->napi;
1502 /* Frame error, so drop the packet. */
1503 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1504 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1505 put_page(lbq_desc->p.pg_chunk.page);
1508 napi->dev = qdev->ndev;
1510 skb = napi_get_frags(napi);
1512 netif_err(qdev, drv, qdev->ndev,
1513 "Couldn't get an skb, exiting.\n");
1514 rx_ring->rx_dropped++;
1515 put_page(lbq_desc->p.pg_chunk.page);
1518 prefetch(lbq_desc->p.pg_chunk.va);
1519 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1520 lbq_desc->p.pg_chunk.page,
1521 lbq_desc->p.pg_chunk.offset,
1525 skb->data_len += length;
1526 skb->truesize += length;
1527 skb_shinfo(skb)->nr_frags++;
1529 rx_ring->rx_packets++;
1530 rx_ring->rx_bytes += length;
1531 skb->ip_summed = CHECKSUM_UNNECESSARY;
1532 skb_record_rx_queue(skb, rx_ring->cq_id);
1533 if (vlan_id != 0xffff)
1534 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1535 napi_gro_frags(napi);
1538 /* Process an inbound completion from an rx ring. */
1539 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1540 struct rx_ring *rx_ring,
1541 struct ib_mac_iocb_rsp *ib_mac_rsp,
1545 struct net_device *ndev = qdev->ndev;
1546 struct sk_buff *skb = NULL;
1548 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1549 struct napi_struct *napi = &rx_ring->napi;
1550 size_t hlen = ETH_HLEN;
1552 skb = netdev_alloc_skb(ndev, length);
1554 rx_ring->rx_dropped++;
1555 put_page(lbq_desc->p.pg_chunk.page);
1559 addr = lbq_desc->p.pg_chunk.va;
1562 /* Frame error, so drop the packet. */
1563 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1564 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1568 /* Update the MAC header length*/
1569 ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
1571 /* The max framesize filter on this chip is set higher than
1572 * MTU since FCoE uses 2k frames.
1574 if (skb->len > ndev->mtu + hlen) {
1575 netif_err(qdev, drv, qdev->ndev,
1576 "Segment too small, dropping.\n");
1577 rx_ring->rx_dropped++;
1580 memcpy(skb_put(skb, hlen), addr, hlen);
1581 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1582 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1584 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1585 lbq_desc->p.pg_chunk.offset + hlen,
1587 skb->len += length - hlen;
1588 skb->data_len += length - hlen;
1589 skb->truesize += length - hlen;
1591 rx_ring->rx_packets++;
1592 rx_ring->rx_bytes += skb->len;
1593 skb->protocol = eth_type_trans(skb, ndev);
1594 skb_checksum_none_assert(skb);
1596 if ((ndev->features & NETIF_F_RXCSUM) &&
1597 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1599 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1600 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1601 "TCP checksum done!\n");
1602 skb->ip_summed = CHECKSUM_UNNECESSARY;
1603 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1604 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1605 /* Unfragmented ipv4 UDP frame. */
1607 (struct iphdr *)((u8 *)addr + hlen);
1608 if (!(iph->frag_off &
1609 htons(IP_MF|IP_OFFSET))) {
1610 skb->ip_summed = CHECKSUM_UNNECESSARY;
1611 netif_printk(qdev, rx_status, KERN_DEBUG,
1613 "UDP checksum done!\n");
1618 skb_record_rx_queue(skb, rx_ring->cq_id);
1619 if (vlan_id != 0xffff)
1620 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1621 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1622 napi_gro_receive(napi, skb);
1624 netif_receive_skb(skb);
1627 dev_kfree_skb_any(skb);
1628 put_page(lbq_desc->p.pg_chunk.page);
1631 /* Process an inbound completion from an rx ring. */
1632 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1633 struct rx_ring *rx_ring,
1634 struct ib_mac_iocb_rsp *ib_mac_rsp,
1638 struct net_device *ndev = qdev->ndev;
1639 struct sk_buff *skb = NULL;
1640 struct sk_buff *new_skb = NULL;
1641 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1643 skb = sbq_desc->p.skb;
1644 /* Allocate new_skb and copy */
1645 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1646 if (new_skb == NULL) {
1647 rx_ring->rx_dropped++;
1650 skb_reserve(new_skb, NET_IP_ALIGN);
1652 pci_dma_sync_single_for_cpu(qdev->pdev,
1653 dma_unmap_addr(sbq_desc, mapaddr),
1654 dma_unmap_len(sbq_desc, maplen),
1655 PCI_DMA_FROMDEVICE);
1657 memcpy(skb_put(new_skb, length), skb->data, length);
1659 pci_dma_sync_single_for_device(qdev->pdev,
1660 dma_unmap_addr(sbq_desc, mapaddr),
1661 dma_unmap_len(sbq_desc, maplen),
1662 PCI_DMA_FROMDEVICE);
1665 /* Frame error, so drop the packet. */
1666 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1667 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1668 dev_kfree_skb_any(skb);
1672 /* loopback self test for ethtool */
1673 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1674 ql_check_lb_frame(qdev, skb);
1675 dev_kfree_skb_any(skb);
1679 /* The max framesize filter on this chip is set higher than
1680 * MTU since FCoE uses 2k frames.
1682 if (skb->len > ndev->mtu + ETH_HLEN) {
1683 dev_kfree_skb_any(skb);
1684 rx_ring->rx_dropped++;
1688 prefetch(skb->data);
1689 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1690 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1692 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1693 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1694 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1695 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1696 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1697 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1699 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1700 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1701 "Promiscuous Packet.\n");
1703 rx_ring->rx_packets++;
1704 rx_ring->rx_bytes += skb->len;
1705 skb->protocol = eth_type_trans(skb, ndev);
1706 skb_checksum_none_assert(skb);
1708 /* If rx checksum is on, and there are no
1709 * csum or frame errors.
1711 if ((ndev->features & NETIF_F_RXCSUM) &&
1712 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1714 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1715 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1716 "TCP checksum done!\n");
1717 skb->ip_summed = CHECKSUM_UNNECESSARY;
1718 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1719 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1720 /* Unfragmented ipv4 UDP frame. */
1721 struct iphdr *iph = (struct iphdr *) skb->data;
1722 if (!(iph->frag_off &
1723 htons(IP_MF|IP_OFFSET))) {
1724 skb->ip_summed = CHECKSUM_UNNECESSARY;
1725 netif_printk(qdev, rx_status, KERN_DEBUG,
1727 "UDP checksum done!\n");
1732 skb_record_rx_queue(skb, rx_ring->cq_id);
1733 if (vlan_id != 0xffff)
1734 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1735 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1736 napi_gro_receive(&rx_ring->napi, skb);
1738 netif_receive_skb(skb);
1741 static void ql_realign_skb(struct sk_buff *skb, int len)
1743 void *temp_addr = skb->data;
1745 /* Undo the skb_reserve(skb,32) we did before
1746 * giving to hardware, and realign data on
1747 * a 2-byte boundary.
1749 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1750 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1751 skb_copy_to_linear_data(skb, temp_addr,
1756 * This function builds an skb for the given inbound
1757 * completion. It will be rewritten for readability in the near
1758 * future, but for not it works well.
1760 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1761 struct rx_ring *rx_ring,
1762 struct ib_mac_iocb_rsp *ib_mac_rsp)
1764 struct bq_desc *lbq_desc;
1765 struct bq_desc *sbq_desc;
1766 struct sk_buff *skb = NULL;
1767 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1768 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1769 size_t hlen = ETH_HLEN;
1772 * Handle the header buffer if present.
1774 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1775 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1776 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1777 "Header of %d bytes in small buffer.\n", hdr_len);
1779 * Headers fit nicely into a small buffer.
1781 sbq_desc = ql_get_curr_sbuf(rx_ring);
1782 pci_unmap_single(qdev->pdev,
1783 dma_unmap_addr(sbq_desc, mapaddr),
1784 dma_unmap_len(sbq_desc, maplen),
1785 PCI_DMA_FROMDEVICE);
1786 skb = sbq_desc->p.skb;
1787 ql_realign_skb(skb, hdr_len);
1788 skb_put(skb, hdr_len);
1789 sbq_desc->p.skb = NULL;
1793 * Handle the data buffer(s).
1795 if (unlikely(!length)) { /* Is there data too? */
1796 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1797 "No Data buffer in this packet.\n");
1801 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1802 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1803 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1804 "Headers in small, data of %d bytes in small, combine them.\n",
1807 * Data is less than small buffer size so it's
1808 * stuffed in a small buffer.
1809 * For this case we append the data
1810 * from the "data" small buffer to the "header" small
1813 sbq_desc = ql_get_curr_sbuf(rx_ring);
1814 pci_dma_sync_single_for_cpu(qdev->pdev,
1816 (sbq_desc, mapaddr),
1819 PCI_DMA_FROMDEVICE);
1820 memcpy(skb_put(skb, length),
1821 sbq_desc->p.skb->data, length);
1822 pci_dma_sync_single_for_device(qdev->pdev,
1829 PCI_DMA_FROMDEVICE);
1831 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1832 "%d bytes in a single small buffer.\n",
1834 sbq_desc = ql_get_curr_sbuf(rx_ring);
1835 skb = sbq_desc->p.skb;
1836 ql_realign_skb(skb, length);
1837 skb_put(skb, length);
1838 pci_unmap_single(qdev->pdev,
1839 dma_unmap_addr(sbq_desc,
1841 dma_unmap_len(sbq_desc,
1843 PCI_DMA_FROMDEVICE);
1844 sbq_desc->p.skb = NULL;
1846 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1847 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1848 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1849 "Header in small, %d bytes in large. Chain large to small!\n",
1852 * The data is in a single large buffer. We
1853 * chain it to the header buffer's skb and let
1856 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1857 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1858 "Chaining page at offset = %d, for %d bytes to skb.\n",
1859 lbq_desc->p.pg_chunk.offset, length);
1860 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1861 lbq_desc->p.pg_chunk.offset,
1864 skb->data_len += length;
1865 skb->truesize += length;
1868 * The headers and data are in a single large buffer. We
1869 * copy it to a new skb and let it go. This can happen with
1870 * jumbo mtu on a non-TCP/UDP frame.
1872 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1873 skb = netdev_alloc_skb(qdev->ndev, length);
1875 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1876 "No skb available, drop the packet.\n");
1879 pci_unmap_page(qdev->pdev,
1880 dma_unmap_addr(lbq_desc,
1882 dma_unmap_len(lbq_desc, maplen),
1883 PCI_DMA_FROMDEVICE);
1884 skb_reserve(skb, NET_IP_ALIGN);
1885 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1886 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1888 skb_fill_page_desc(skb, 0,
1889 lbq_desc->p.pg_chunk.page,
1890 lbq_desc->p.pg_chunk.offset,
1893 skb->data_len += length;
1894 skb->truesize += length;
1895 ql_update_mac_hdr_len(qdev, ib_mac_rsp,
1896 lbq_desc->p.pg_chunk.va,
1898 __pskb_pull_tail(skb, hlen);
1902 * The data is in a chain of large buffers
1903 * pointed to by a small buffer. We loop
1904 * thru and chain them to the our small header
1906 * frags: There are 18 max frags and our small
1907 * buffer will hold 32 of them. The thing is,
1908 * we'll use 3 max for our 9000 byte jumbo
1909 * frames. If the MTU goes up we could
1910 * eventually be in trouble.
1913 sbq_desc = ql_get_curr_sbuf(rx_ring);
1914 pci_unmap_single(qdev->pdev,
1915 dma_unmap_addr(sbq_desc, mapaddr),
1916 dma_unmap_len(sbq_desc, maplen),
1917 PCI_DMA_FROMDEVICE);
1918 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1920 * This is an non TCP/UDP IP frame, so
1921 * the headers aren't split into a small
1922 * buffer. We have to use the small buffer
1923 * that contains our sg list as our skb to
1924 * send upstairs. Copy the sg list here to
1925 * a local buffer and use it to find the
1928 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1929 "%d bytes of headers & data in chain of large.\n",
1931 skb = sbq_desc->p.skb;
1932 sbq_desc->p.skb = NULL;
1933 skb_reserve(skb, NET_IP_ALIGN);
1936 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1937 size = (length < rx_ring->lbq_buf_size) ? length :
1938 rx_ring->lbq_buf_size;
1940 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1941 "Adding page %d to skb for %d bytes.\n",
1943 skb_fill_page_desc(skb, i,
1944 lbq_desc->p.pg_chunk.page,
1945 lbq_desc->p.pg_chunk.offset,
1948 skb->data_len += size;
1949 skb->truesize += size;
1952 } while (length > 0);
1953 ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
1955 __pskb_pull_tail(skb, hlen);
1960 /* Process an inbound completion from an rx ring. */
1961 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1962 struct rx_ring *rx_ring,
1963 struct ib_mac_iocb_rsp *ib_mac_rsp,
1966 struct net_device *ndev = qdev->ndev;
1967 struct sk_buff *skb = NULL;
1969 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1971 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1972 if (unlikely(!skb)) {
1973 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1974 "No skb available, drop packet.\n");
1975 rx_ring->rx_dropped++;
1979 /* Frame error, so drop the packet. */
1980 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1981 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1982 dev_kfree_skb_any(skb);
1986 /* The max framesize filter on this chip is set higher than
1987 * MTU since FCoE uses 2k frames.
1989 if (skb->len > ndev->mtu + ETH_HLEN) {
1990 dev_kfree_skb_any(skb);
1991 rx_ring->rx_dropped++;
1995 /* loopback self test for ethtool */
1996 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1997 ql_check_lb_frame(qdev, skb);
1998 dev_kfree_skb_any(skb);
2002 prefetch(skb->data);
2003 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
2004 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
2005 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2006 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
2007 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2008 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
2009 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2010 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
2011 rx_ring->rx_multicast++;
2013 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
2014 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2015 "Promiscuous Packet.\n");
2018 skb->protocol = eth_type_trans(skb, ndev);
2019 skb_checksum_none_assert(skb);
2021 /* If rx checksum is on, and there are no
2022 * csum or frame errors.
2024 if ((ndev->features & NETIF_F_RXCSUM) &&
2025 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
2027 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
2028 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2029 "TCP checksum done!\n");
2030 skb->ip_summed = CHECKSUM_UNNECESSARY;
2031 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
2032 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2033 /* Unfragmented ipv4 UDP frame. */
2034 struct iphdr *iph = (struct iphdr *) skb->data;
2035 if (!(iph->frag_off &
2036 htons(IP_MF|IP_OFFSET))) {
2037 skb->ip_summed = CHECKSUM_UNNECESSARY;
2038 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2039 "TCP checksum done!\n");
2044 rx_ring->rx_packets++;
2045 rx_ring->rx_bytes += skb->len;
2046 skb_record_rx_queue(skb, rx_ring->cq_id);
2047 if (vlan_id != 0xffff)
2048 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
2049 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2050 napi_gro_receive(&rx_ring->napi, skb);
2052 netif_receive_skb(skb);
2055 /* Process an inbound completion from an rx ring. */
2056 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2057 struct rx_ring *rx_ring,
2058 struct ib_mac_iocb_rsp *ib_mac_rsp)
2060 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2061 u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2062 (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
2063 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2064 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2066 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2068 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2069 /* The data and headers are split into
2072 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2074 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2075 /* The data fit in a single small buffer.
2076 * Allocate a new skb, copy the data and
2077 * return the buffer to the free pool.
2079 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2081 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2082 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2083 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2084 /* TCP packet in a page chunk that's been checksummed.
2085 * Tack it on to our GRO skb and let it go.
2087 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2089 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2090 /* Non-TCP packet in a page chunk. Allocate an
2091 * skb, tack it on frags, and send it up.
2093 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2096 /* Non-TCP/UDP large frames that span multiple buffers
2097 * can be processed corrrectly by the split frame logic.
2099 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2103 return (unsigned long)length;
2106 /* Process an outbound completion from an rx ring. */
2107 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2108 struct ob_mac_iocb_rsp *mac_rsp)
2110 struct tx_ring *tx_ring;
2111 struct tx_ring_desc *tx_ring_desc;
2113 QL_DUMP_OB_MAC_RSP(mac_rsp);
2114 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2115 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2116 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2117 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2118 tx_ring->tx_packets++;
2119 dev_kfree_skb(tx_ring_desc->skb);
2120 tx_ring_desc->skb = NULL;
2122 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2125 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2126 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2127 netif_warn(qdev, tx_done, qdev->ndev,
2128 "Total descriptor length did not match transfer length.\n");
2130 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2131 netif_warn(qdev, tx_done, qdev->ndev,
2132 "Frame too short to be valid, not sent.\n");
2134 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2135 netif_warn(qdev, tx_done, qdev->ndev,
2136 "Frame too long, but sent anyway.\n");
2138 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2139 netif_warn(qdev, tx_done, qdev->ndev,
2140 "PCI backplane error. Frame not sent.\n");
2143 atomic_inc(&tx_ring->tx_count);
2146 /* Fire up a handler to reset the MPI processor. */
2147 void ql_queue_fw_error(struct ql_adapter *qdev)
2150 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2153 void ql_queue_asic_error(struct ql_adapter *qdev)
2156 ql_disable_interrupts(qdev);
2157 /* Clear adapter up bit to signal the recovery
2158 * process that it shouldn't kill the reset worker
2161 clear_bit(QL_ADAPTER_UP, &qdev->flags);
2162 /* Set asic recovery bit to indicate reset process that we are
2163 * in fatal error recovery process rather than normal close
2165 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2166 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2169 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2170 struct ib_ae_iocb_rsp *ib_ae_rsp)
2172 switch (ib_ae_rsp->event) {
2173 case MGMT_ERR_EVENT:
2174 netif_err(qdev, rx_err, qdev->ndev,
2175 "Management Processor Fatal Error.\n");
2176 ql_queue_fw_error(qdev);
2179 case CAM_LOOKUP_ERR_EVENT:
2180 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2181 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2182 ql_queue_asic_error(qdev);
2185 case SOFT_ECC_ERROR_EVENT:
2186 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2187 ql_queue_asic_error(qdev);
2190 case PCI_ERR_ANON_BUF_RD:
2191 netdev_err(qdev->ndev, "PCI error occurred when reading "
2192 "anonymous buffers from rx_ring %d.\n",
2194 ql_queue_asic_error(qdev);
2198 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2200 ql_queue_asic_error(qdev);
2205 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2207 struct ql_adapter *qdev = rx_ring->qdev;
2208 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2209 struct ob_mac_iocb_rsp *net_rsp = NULL;
2212 struct tx_ring *tx_ring;
2213 /* While there are entries in the completion queue. */
2214 while (prod != rx_ring->cnsmr_idx) {
2216 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2217 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2218 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2220 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2222 switch (net_rsp->opcode) {
2224 case OPCODE_OB_MAC_TSO_IOCB:
2225 case OPCODE_OB_MAC_IOCB:
2226 ql_process_mac_tx_intr(qdev, net_rsp);
2229 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2230 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2234 ql_update_cq(rx_ring);
2235 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2239 ql_write_cq_idx(rx_ring);
2240 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2241 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2242 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2244 * The queue got stopped because the tx_ring was full.
2245 * Wake it up, because it's now at least 25% empty.
2247 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2253 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2255 struct ql_adapter *qdev = rx_ring->qdev;
2256 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2257 struct ql_net_rsp_iocb *net_rsp;
2260 /* While there are entries in the completion queue. */
2261 while (prod != rx_ring->cnsmr_idx) {
2263 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2264 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2265 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2267 net_rsp = rx_ring->curr_entry;
2269 switch (net_rsp->opcode) {
2270 case OPCODE_IB_MAC_IOCB:
2271 ql_process_mac_rx_intr(qdev, rx_ring,
2272 (struct ib_mac_iocb_rsp *)
2276 case OPCODE_IB_AE_IOCB:
2277 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2281 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2282 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2287 ql_update_cq(rx_ring);
2288 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2289 if (count == budget)
2292 ql_update_buffer_queues(qdev, rx_ring);
2293 ql_write_cq_idx(rx_ring);
2297 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2299 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2300 struct ql_adapter *qdev = rx_ring->qdev;
2301 struct rx_ring *trx_ring;
2302 int i, work_done = 0;
2303 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2305 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2306 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2308 /* Service the TX rings first. They start
2309 * right after the RSS rings. */
2310 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2311 trx_ring = &qdev->rx_ring[i];
2312 /* If this TX completion ring belongs to this vector and
2313 * it's not empty then service it.
2315 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2316 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2317 trx_ring->cnsmr_idx)) {
2318 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2319 "%s: Servicing TX completion ring %d.\n",
2320 __func__, trx_ring->cq_id);
2321 ql_clean_outbound_rx_ring(trx_ring);
2326 * Now service the RSS ring if it's active.
2328 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2329 rx_ring->cnsmr_idx) {
2330 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2331 "%s: Servicing RX completion ring %d.\n",
2332 __func__, rx_ring->cq_id);
2333 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2336 if (work_done < budget) {
2337 napi_complete(napi);
2338 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2343 static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2345 struct ql_adapter *qdev = netdev_priv(ndev);
2347 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
2348 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2349 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2351 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2356 * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
2357 * based on the features to enable/disable hardware vlan accel
2359 static int qlge_update_hw_vlan_features(struct net_device *ndev,
2360 netdev_features_t features)
2362 struct ql_adapter *qdev = netdev_priv(ndev);
2364 bool need_restart = netif_running(ndev);
2367 status = ql_adapter_down(qdev);
2369 netif_err(qdev, link, qdev->ndev,
2370 "Failed to bring down the adapter\n");
2375 /* update the features with resent change */
2376 ndev->features = features;
2379 status = ql_adapter_up(qdev);
2381 netif_err(qdev, link, qdev->ndev,
2382 "Failed to bring up the adapter\n");
2390 static int qlge_set_features(struct net_device *ndev,
2391 netdev_features_t features)
2393 netdev_features_t changed = ndev->features ^ features;
2396 if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
2397 /* Update the behavior of vlan accel in the adapter */
2398 err = qlge_update_hw_vlan_features(ndev, features);
2402 qlge_vlan_mode(ndev, features);
2408 static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2410 u32 enable_bit = MAC_ADDR_E;
2413 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2414 MAC_ADDR_TYPE_VLAN, vid);
2416 netif_err(qdev, ifup, qdev->ndev,
2417 "Failed to init vlan address.\n");
2421 static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
2423 struct ql_adapter *qdev = netdev_priv(ndev);
2427 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2431 err = __qlge_vlan_rx_add_vid(qdev, vid);
2432 set_bit(vid, qdev->active_vlans);
2434 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2439 static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2444 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2445 MAC_ADDR_TYPE_VLAN, vid);
2447 netif_err(qdev, ifup, qdev->ndev,
2448 "Failed to clear vlan address.\n");
2452 static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
2454 struct ql_adapter *qdev = netdev_priv(ndev);
2458 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2462 err = __qlge_vlan_rx_kill_vid(qdev, vid);
2463 clear_bit(vid, qdev->active_vlans);
2465 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2470 static void qlge_restore_vlan(struct ql_adapter *qdev)
2475 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2479 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2480 __qlge_vlan_rx_add_vid(qdev, vid);
2482 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2485 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2486 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2488 struct rx_ring *rx_ring = dev_id;
2489 napi_schedule(&rx_ring->napi);
2493 /* This handles a fatal error, MPI activity, and the default
2494 * rx_ring in an MSI-X multiple vector environment.
2495 * In MSI/Legacy environment it also process the rest of
2498 static irqreturn_t qlge_isr(int irq, void *dev_id)
2500 struct rx_ring *rx_ring = dev_id;
2501 struct ql_adapter *qdev = rx_ring->qdev;
2502 struct intr_context *intr_context = &qdev->intr_context[0];
2506 spin_lock(&qdev->hw_lock);
2507 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2508 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2509 "Shared Interrupt, Not ours!\n");
2510 spin_unlock(&qdev->hw_lock);
2513 spin_unlock(&qdev->hw_lock);
2515 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2518 * Check for fatal error.
2521 ql_queue_asic_error(qdev);
2522 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2523 var = ql_read32(qdev, ERR_STS);
2524 netdev_err(qdev->ndev, "Resetting chip. "
2525 "Error Status Register = 0x%x\n", var);
2530 * Check MPI processor activity.
2532 if ((var & STS_PI) &&
2533 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2535 * We've got an async event or mailbox completion.
2536 * Handle it and clear the source of the interrupt.
2538 netif_err(qdev, intr, qdev->ndev,
2539 "Got MPI processor interrupt.\n");
2540 ql_disable_completion_interrupt(qdev, intr_context->intr);
2541 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2542 queue_delayed_work_on(smp_processor_id(),
2543 qdev->workqueue, &qdev->mpi_work, 0);
2548 * Get the bit-mask that shows the active queues for this
2549 * pass. Compare it to the queues that this irq services
2550 * and call napi if there's a match.
2552 var = ql_read32(qdev, ISR1);
2553 if (var & intr_context->irq_mask) {
2554 netif_info(qdev, intr, qdev->ndev,
2555 "Waking handler for rx_ring[0].\n");
2556 ql_disable_completion_interrupt(qdev, intr_context->intr);
2557 napi_schedule(&rx_ring->napi);
2560 ql_enable_completion_interrupt(qdev, intr_context->intr);
2561 return work_done ? IRQ_HANDLED : IRQ_NONE;
2564 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2567 if (skb_is_gso(skb)) {
2569 __be16 l3_proto = vlan_get_protocol(skb);
2571 err = skb_cow_head(skb, 0);
2575 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2576 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2577 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2578 mac_iocb_ptr->total_hdrs_len =
2579 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2580 mac_iocb_ptr->net_trans_offset =
2581 cpu_to_le16(skb_network_offset(skb) |
2582 skb_transport_offset(skb)
2583 << OB_MAC_TRANSPORT_HDR_SHIFT);
2584 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2585 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2586 if (likely(l3_proto == htons(ETH_P_IP))) {
2587 struct iphdr *iph = ip_hdr(skb);
2589 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2590 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2594 } else if (l3_proto == htons(ETH_P_IPV6)) {
2595 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2596 tcp_hdr(skb)->check =
2597 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2598 &ipv6_hdr(skb)->daddr,
2606 static void ql_hw_csum_setup(struct sk_buff *skb,
2607 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2610 struct iphdr *iph = ip_hdr(skb);
2612 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2613 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2614 mac_iocb_ptr->net_trans_offset =
2615 cpu_to_le16(skb_network_offset(skb) |
2616 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2618 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2619 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2620 if (likely(iph->protocol == IPPROTO_TCP)) {
2621 check = &(tcp_hdr(skb)->check);
2622 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2623 mac_iocb_ptr->total_hdrs_len =
2624 cpu_to_le16(skb_transport_offset(skb) +
2625 (tcp_hdr(skb)->doff << 2));
2627 check = &(udp_hdr(skb)->check);
2628 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2629 mac_iocb_ptr->total_hdrs_len =
2630 cpu_to_le16(skb_transport_offset(skb) +
2631 sizeof(struct udphdr));
2633 *check = ~csum_tcpudp_magic(iph->saddr,
2634 iph->daddr, len, iph->protocol, 0);
2637 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2639 struct tx_ring_desc *tx_ring_desc;
2640 struct ob_mac_iocb_req *mac_iocb_ptr;
2641 struct ql_adapter *qdev = netdev_priv(ndev);
2643 struct tx_ring *tx_ring;
2644 u32 tx_ring_idx = (u32) skb->queue_mapping;
2646 tx_ring = &qdev->tx_ring[tx_ring_idx];
2648 if (skb_padto(skb, ETH_ZLEN))
2649 return NETDEV_TX_OK;
2651 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2652 netif_info(qdev, tx_queued, qdev->ndev,
2653 "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
2654 __func__, tx_ring_idx);
2655 netif_stop_subqueue(ndev, tx_ring->wq_id);
2656 tx_ring->tx_errors++;
2657 return NETDEV_TX_BUSY;
2659 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2660 mac_iocb_ptr = tx_ring_desc->queue_entry;
2661 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2663 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2664 mac_iocb_ptr->tid = tx_ring_desc->index;
2665 /* We use the upper 32-bits to store the tx queue for this IO.
2666 * When we get the completion we can use it to establish the context.
2668 mac_iocb_ptr->txq_idx = tx_ring_idx;
2669 tx_ring_desc->skb = skb;
2671 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2673 if (skb_vlan_tag_present(skb)) {
2674 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2675 "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
2676 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2677 mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
2679 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2681 dev_kfree_skb_any(skb);
2682 return NETDEV_TX_OK;
2683 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2684 ql_hw_csum_setup(skb,
2685 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2687 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2689 netif_err(qdev, tx_queued, qdev->ndev,
2690 "Could not map the segments.\n");
2691 tx_ring->tx_errors++;
2692 return NETDEV_TX_BUSY;
2694 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2695 tx_ring->prod_idx++;
2696 if (tx_ring->prod_idx == tx_ring->wq_len)
2697 tx_ring->prod_idx = 0;
2700 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2701 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2702 "tx queued, slot %d, len %d\n",
2703 tx_ring->prod_idx, skb->len);
2705 atomic_dec(&tx_ring->tx_count);
2707 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2708 netif_stop_subqueue(ndev, tx_ring->wq_id);
2709 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2711 * The queue got stopped because the tx_ring was full.
2712 * Wake it up, because it's now at least 25% empty.
2714 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2716 return NETDEV_TX_OK;
2720 static void ql_free_shadow_space(struct ql_adapter *qdev)
2722 if (qdev->rx_ring_shadow_reg_area) {
2723 pci_free_consistent(qdev->pdev,
2725 qdev->rx_ring_shadow_reg_area,
2726 qdev->rx_ring_shadow_reg_dma);
2727 qdev->rx_ring_shadow_reg_area = NULL;
2729 if (qdev->tx_ring_shadow_reg_area) {
2730 pci_free_consistent(qdev->pdev,
2732 qdev->tx_ring_shadow_reg_area,
2733 qdev->tx_ring_shadow_reg_dma);
2734 qdev->tx_ring_shadow_reg_area = NULL;
2738 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2740 qdev->rx_ring_shadow_reg_area =
2741 pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
2742 &qdev->rx_ring_shadow_reg_dma);
2743 if (qdev->rx_ring_shadow_reg_area == NULL) {
2744 netif_err(qdev, ifup, qdev->ndev,
2745 "Allocation of RX shadow space failed.\n");
2749 qdev->tx_ring_shadow_reg_area =
2750 pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
2751 &qdev->tx_ring_shadow_reg_dma);
2752 if (qdev->tx_ring_shadow_reg_area == NULL) {
2753 netif_err(qdev, ifup, qdev->ndev,
2754 "Allocation of TX shadow space failed.\n");
2755 goto err_wqp_sh_area;
2760 pci_free_consistent(qdev->pdev,
2762 qdev->rx_ring_shadow_reg_area,
2763 qdev->rx_ring_shadow_reg_dma);
2767 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2769 struct tx_ring_desc *tx_ring_desc;
2771 struct ob_mac_iocb_req *mac_iocb_ptr;
2773 mac_iocb_ptr = tx_ring->wq_base;
2774 tx_ring_desc = tx_ring->q;
2775 for (i = 0; i < tx_ring->wq_len; i++) {
2776 tx_ring_desc->index = i;
2777 tx_ring_desc->skb = NULL;
2778 tx_ring_desc->queue_entry = mac_iocb_ptr;
2782 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2785 static void ql_free_tx_resources(struct ql_adapter *qdev,
2786 struct tx_ring *tx_ring)
2788 if (tx_ring->wq_base) {
2789 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2790 tx_ring->wq_base, tx_ring->wq_base_dma);
2791 tx_ring->wq_base = NULL;
2797 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2798 struct tx_ring *tx_ring)
2801 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2802 &tx_ring->wq_base_dma);
2804 if ((tx_ring->wq_base == NULL) ||
2805 tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2809 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2810 if (tx_ring->q == NULL)
2815 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2816 tx_ring->wq_base, tx_ring->wq_base_dma);
2817 tx_ring->wq_base = NULL;
2819 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2823 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2825 struct bq_desc *lbq_desc;
2827 uint32_t curr_idx, clean_idx;
2829 curr_idx = rx_ring->lbq_curr_idx;
2830 clean_idx = rx_ring->lbq_clean_idx;
2831 while (curr_idx != clean_idx) {
2832 lbq_desc = &rx_ring->lbq[curr_idx];
2834 if (lbq_desc->p.pg_chunk.last_flag) {
2835 pci_unmap_page(qdev->pdev,
2836 lbq_desc->p.pg_chunk.map,
2837 ql_lbq_block_size(qdev),
2838 PCI_DMA_FROMDEVICE);
2839 lbq_desc->p.pg_chunk.last_flag = 0;
2842 put_page(lbq_desc->p.pg_chunk.page);
2843 lbq_desc->p.pg_chunk.page = NULL;
2845 if (++curr_idx == rx_ring->lbq_len)
2849 if (rx_ring->pg_chunk.page) {
2850 pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map,
2851 ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
2852 put_page(rx_ring->pg_chunk.page);
2853 rx_ring->pg_chunk.page = NULL;
2857 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2860 struct bq_desc *sbq_desc;
2862 for (i = 0; i < rx_ring->sbq_len; i++) {
2863 sbq_desc = &rx_ring->sbq[i];
2864 if (sbq_desc == NULL) {
2865 netif_err(qdev, ifup, qdev->ndev,
2866 "sbq_desc %d is NULL.\n", i);
2869 if (sbq_desc->p.skb) {
2870 pci_unmap_single(qdev->pdev,
2871 dma_unmap_addr(sbq_desc, mapaddr),
2872 dma_unmap_len(sbq_desc, maplen),
2873 PCI_DMA_FROMDEVICE);
2874 dev_kfree_skb(sbq_desc->p.skb);
2875 sbq_desc->p.skb = NULL;
2880 /* Free all large and small rx buffers associated
2881 * with the completion queues for this device.
2883 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2886 struct rx_ring *rx_ring;
2888 for (i = 0; i < qdev->rx_ring_count; i++) {
2889 rx_ring = &qdev->rx_ring[i];
2891 ql_free_lbq_buffers(qdev, rx_ring);
2893 ql_free_sbq_buffers(qdev, rx_ring);
2897 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2899 struct rx_ring *rx_ring;
2902 for (i = 0; i < qdev->rx_ring_count; i++) {
2903 rx_ring = &qdev->rx_ring[i];
2904 if (rx_ring->type != TX_Q)
2905 ql_update_buffer_queues(qdev, rx_ring);
2909 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2910 struct rx_ring *rx_ring)
2913 struct bq_desc *lbq_desc;
2914 __le64 *bq = rx_ring->lbq_base;
2916 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2917 for (i = 0; i < rx_ring->lbq_len; i++) {
2918 lbq_desc = &rx_ring->lbq[i];
2919 memset(lbq_desc, 0, sizeof(*lbq_desc));
2920 lbq_desc->index = i;
2921 lbq_desc->addr = bq;
2926 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2927 struct rx_ring *rx_ring)
2930 struct bq_desc *sbq_desc;
2931 __le64 *bq = rx_ring->sbq_base;
2933 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2934 for (i = 0; i < rx_ring->sbq_len; i++) {
2935 sbq_desc = &rx_ring->sbq[i];
2936 memset(sbq_desc, 0, sizeof(*sbq_desc));
2937 sbq_desc->index = i;
2938 sbq_desc->addr = bq;
2943 static void ql_free_rx_resources(struct ql_adapter *qdev,
2944 struct rx_ring *rx_ring)
2946 /* Free the small buffer queue. */
2947 if (rx_ring->sbq_base) {
2948 pci_free_consistent(qdev->pdev,
2950 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2951 rx_ring->sbq_base = NULL;
2954 /* Free the small buffer queue control blocks. */
2955 kfree(rx_ring->sbq);
2956 rx_ring->sbq = NULL;
2958 /* Free the large buffer queue. */
2959 if (rx_ring->lbq_base) {
2960 pci_free_consistent(qdev->pdev,
2962 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2963 rx_ring->lbq_base = NULL;
2966 /* Free the large buffer queue control blocks. */
2967 kfree(rx_ring->lbq);
2968 rx_ring->lbq = NULL;
2970 /* Free the rx queue. */
2971 if (rx_ring->cq_base) {
2972 pci_free_consistent(qdev->pdev,
2974 rx_ring->cq_base, rx_ring->cq_base_dma);
2975 rx_ring->cq_base = NULL;
2979 /* Allocate queues and buffers for this completions queue based
2980 * on the values in the parameter structure. */
2981 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2982 struct rx_ring *rx_ring)
2986 * Allocate the completion queue for this rx_ring.
2989 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2990 &rx_ring->cq_base_dma);
2992 if (rx_ring->cq_base == NULL) {
2993 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2997 if (rx_ring->sbq_len) {
2999 * Allocate small buffer queue.
3002 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
3003 &rx_ring->sbq_base_dma);
3005 if (rx_ring->sbq_base == NULL) {
3006 netif_err(qdev, ifup, qdev->ndev,
3007 "Small buffer queue allocation failed.\n");
3012 * Allocate small buffer queue control blocks.
3014 rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
3015 sizeof(struct bq_desc),
3017 if (rx_ring->sbq == NULL)
3020 ql_init_sbq_ring(qdev, rx_ring);
3023 if (rx_ring->lbq_len) {
3025 * Allocate large buffer queue.
3028 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
3029 &rx_ring->lbq_base_dma);
3031 if (rx_ring->lbq_base == NULL) {
3032 netif_err(qdev, ifup, qdev->ndev,
3033 "Large buffer queue allocation failed.\n");
3037 * Allocate large buffer queue control blocks.
3039 rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
3040 sizeof(struct bq_desc),
3042 if (rx_ring->lbq == NULL)
3045 ql_init_lbq_ring(qdev, rx_ring);
3051 ql_free_rx_resources(qdev, rx_ring);
3055 static void ql_tx_ring_clean(struct ql_adapter *qdev)
3057 struct tx_ring *tx_ring;
3058 struct tx_ring_desc *tx_ring_desc;
3062 * Loop through all queues and free
3065 for (j = 0; j < qdev->tx_ring_count; j++) {
3066 tx_ring = &qdev->tx_ring[j];
3067 for (i = 0; i < tx_ring->wq_len; i++) {
3068 tx_ring_desc = &tx_ring->q[i];
3069 if (tx_ring_desc && tx_ring_desc->skb) {
3070 netif_err(qdev, ifdown, qdev->ndev,
3071 "Freeing lost SKB %p, from queue %d, index %d.\n",
3072 tx_ring_desc->skb, j,
3073 tx_ring_desc->index);
3074 ql_unmap_send(qdev, tx_ring_desc,
3075 tx_ring_desc->map_cnt);
3076 dev_kfree_skb(tx_ring_desc->skb);
3077 tx_ring_desc->skb = NULL;
3083 static void ql_free_mem_resources(struct ql_adapter *qdev)
3087 for (i = 0; i < qdev->tx_ring_count; i++)
3088 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3089 for (i = 0; i < qdev->rx_ring_count; i++)
3090 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3091 ql_free_shadow_space(qdev);
3094 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3098 /* Allocate space for our shadow registers and such. */
3099 if (ql_alloc_shadow_space(qdev))
3102 for (i = 0; i < qdev->rx_ring_count; i++) {
3103 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
3104 netif_err(qdev, ifup, qdev->ndev,
3105 "RX resource allocation failed.\n");
3109 /* Allocate tx queue resources */
3110 for (i = 0; i < qdev->tx_ring_count; i++) {
3111 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
3112 netif_err(qdev, ifup, qdev->ndev,
3113 "TX resource allocation failed.\n");
3120 ql_free_mem_resources(qdev);
3124 /* Set up the rx ring control block and pass it to the chip.
3125 * The control block is defined as
3126 * "Completion Queue Initialization Control Block", or cqicb.
3128 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3130 struct cqicb *cqicb = &rx_ring->cqicb;
3131 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3132 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3133 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3134 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3135 void __iomem *doorbell_area =
3136 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3140 __le64 *base_indirect_ptr;
3143 /* Set up the shadow registers for this ring. */
3144 rx_ring->prod_idx_sh_reg = shadow_reg;
3145 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3146 *rx_ring->prod_idx_sh_reg = 0;
3147 shadow_reg += sizeof(u64);
3148 shadow_reg_dma += sizeof(u64);
3149 rx_ring->lbq_base_indirect = shadow_reg;
3150 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3151 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3152 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3153 rx_ring->sbq_base_indirect = shadow_reg;
3154 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3156 /* PCI doorbell mem area + 0x00 for consumer index register */
3157 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3158 rx_ring->cnsmr_idx = 0;
3159 rx_ring->curr_entry = rx_ring->cq_base;
3161 /* PCI doorbell mem area + 0x04 for valid register */
3162 rx_ring->valid_db_reg = doorbell_area + 0x04;
3164 /* PCI doorbell mem area + 0x18 for large buffer consumer */
3165 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3167 /* PCI doorbell mem area + 0x1c */
3168 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3170 memset((void *)cqicb, 0, sizeof(struct cqicb));
3171 cqicb->msix_vect = rx_ring->irq;
3173 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3174 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3176 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3178 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3181 * Set up the control block load flags.
3183 cqicb->flags = FLAGS_LC | /* Load queue base address */
3184 FLAGS_LV | /* Load MSI-X vector */
3185 FLAGS_LI; /* Load irq delay values */
3186 if (rx_ring->lbq_len) {
3187 cqicb->flags |= FLAGS_LL; /* Load lbq values */
3188 tmp = (u64)rx_ring->lbq_base_dma;
3189 base_indirect_ptr = rx_ring->lbq_base_indirect;
3192 *base_indirect_ptr = cpu_to_le64(tmp);
3193 tmp += DB_PAGE_SIZE;
3194 base_indirect_ptr++;
3196 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3198 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3199 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3200 (u16) rx_ring->lbq_buf_size;
3201 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3202 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3203 (u16) rx_ring->lbq_len;
3204 cqicb->lbq_len = cpu_to_le16(bq_len);
3205 rx_ring->lbq_prod_idx = 0;
3206 rx_ring->lbq_curr_idx = 0;
3207 rx_ring->lbq_clean_idx = 0;
3208 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3210 if (rx_ring->sbq_len) {
3211 cqicb->flags |= FLAGS_LS; /* Load sbq values */
3212 tmp = (u64)rx_ring->sbq_base_dma;
3213 base_indirect_ptr = rx_ring->sbq_base_indirect;
3216 *base_indirect_ptr = cpu_to_le64(tmp);
3217 tmp += DB_PAGE_SIZE;
3218 base_indirect_ptr++;
3220 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
3222 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
3223 cqicb->sbq_buf_size =
3224 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
3225 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3226 (u16) rx_ring->sbq_len;
3227 cqicb->sbq_len = cpu_to_le16(bq_len);
3228 rx_ring->sbq_prod_idx = 0;
3229 rx_ring->sbq_curr_idx = 0;
3230 rx_ring->sbq_clean_idx = 0;
3231 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
3233 switch (rx_ring->type) {
3235 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3236 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3239 /* Inbound completion handling rx_rings run in
3240 * separate NAPI contexts.
3242 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3244 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3245 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3248 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3249 "Invalid rx_ring->type = %d.\n", rx_ring->type);
3251 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3252 CFG_LCQ, rx_ring->cq_id);
3254 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3260 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3262 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3263 void __iomem *doorbell_area =
3264 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3265 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3266 (tx_ring->wq_id * sizeof(u64));
3267 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3268 (tx_ring->wq_id * sizeof(u64));
3272 * Assign doorbell registers for this tx_ring.
3274 /* TX PCI doorbell mem area for tx producer index */
3275 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3276 tx_ring->prod_idx = 0;
3277 /* TX PCI doorbell mem area + 0x04 */
3278 tx_ring->valid_db_reg = doorbell_area + 0x04;
3281 * Assign shadow registers for this tx_ring.
3283 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3284 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3286 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3287 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3288 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3289 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3291 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3293 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3295 ql_init_tx_ring(qdev, tx_ring);
3297 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3298 (u16) tx_ring->wq_id);
3300 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3306 static void ql_disable_msix(struct ql_adapter *qdev)
3308 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3309 pci_disable_msix(qdev->pdev);
3310 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3311 kfree(qdev->msi_x_entry);
3312 qdev->msi_x_entry = NULL;
3313 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3314 pci_disable_msi(qdev->pdev);
3315 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3319 /* We start by trying to get the number of vectors
3320 * stored in qdev->intr_count. If we don't get that
3321 * many then we reduce the count and try again.
3323 static void ql_enable_msix(struct ql_adapter *qdev)
3327 /* Get the MSIX vectors. */
3328 if (qlge_irq_type == MSIX_IRQ) {
3329 /* Try to alloc space for the msix struct,
3330 * if it fails then go to MSI/legacy.
3332 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3333 sizeof(struct msix_entry),
3335 if (!qdev->msi_x_entry) {
3336 qlge_irq_type = MSI_IRQ;
3340 for (i = 0; i < qdev->intr_count; i++)
3341 qdev->msi_x_entry[i].entry = i;
3343 err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
3344 1, qdev->intr_count);
3346 kfree(qdev->msi_x_entry);
3347 qdev->msi_x_entry = NULL;
3348 netif_warn(qdev, ifup, qdev->ndev,
3349 "MSI-X Enable failed, trying MSI.\n");
3350 qlge_irq_type = MSI_IRQ;
3352 qdev->intr_count = err;
3353 set_bit(QL_MSIX_ENABLED, &qdev->flags);
3354 netif_info(qdev, ifup, qdev->ndev,
3355 "MSI-X Enabled, got %d vectors.\n",
3361 qdev->intr_count = 1;
3362 if (qlge_irq_type == MSI_IRQ) {
3363 if (!pci_enable_msi(qdev->pdev)) {
3364 set_bit(QL_MSI_ENABLED, &qdev->flags);
3365 netif_info(qdev, ifup, qdev->ndev,
3366 "Running with MSI interrupts.\n");
3370 qlge_irq_type = LEG_IRQ;
3371 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3372 "Running with legacy interrupts.\n");
3375 /* Each vector services 1 RSS ring and and 1 or more
3376 * TX completion rings. This function loops through
3377 * the TX completion rings and assigns the vector that
3378 * will service it. An example would be if there are
3379 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3380 * This would mean that vector 0 would service RSS ring 0
3381 * and TX completion rings 0,1,2 and 3. Vector 1 would
3382 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3384 static void ql_set_tx_vect(struct ql_adapter *qdev)
3387 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3389 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3390 /* Assign irq vectors to TX rx_rings.*/
3391 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3392 i < qdev->rx_ring_count; i++) {
3393 if (j == tx_rings_per_vector) {
3397 qdev->rx_ring[i].irq = vect;
3401 /* For single vector all rings have an irq
3404 for (i = 0; i < qdev->rx_ring_count; i++)
3405 qdev->rx_ring[i].irq = 0;
3409 /* Set the interrupt mask for this vector. Each vector
3410 * will service 1 RSS ring and 1 or more TX completion
3411 * rings. This function sets up a bit mask per vector
3412 * that indicates which rings it services.
3414 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3416 int j, vect = ctx->intr;
3417 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3419 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3420 /* Add the RSS ring serviced by this vector
3423 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3424 /* Add the TX ring(s) serviced by this vector
3426 for (j = 0; j < tx_rings_per_vector; j++) {
3428 (1 << qdev->rx_ring[qdev->rss_ring_count +
3429 (vect * tx_rings_per_vector) + j].cq_id);
3432 /* For single vector we just shift each queue's
3435 for (j = 0; j < qdev->rx_ring_count; j++)
3436 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3441 * Here we build the intr_context structures based on
3442 * our rx_ring count and intr vector count.
3443 * The intr_context structure is used to hook each vector
3444 * to possibly different handlers.
3446 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3449 struct intr_context *intr_context = &qdev->intr_context[0];
3451 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3452 /* Each rx_ring has it's
3453 * own intr_context since we have separate
3454 * vectors for each queue.
3456 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3457 qdev->rx_ring[i].irq = i;
3458 intr_context->intr = i;
3459 intr_context->qdev = qdev;
3460 /* Set up this vector's bit-mask that indicates
3461 * which queues it services.
3463 ql_set_irq_mask(qdev, intr_context);
3465 * We set up each vectors enable/disable/read bits so
3466 * there's no bit/mask calculations in the critical path.
3468 intr_context->intr_en_mask =
3469 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3470 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3472 intr_context->intr_dis_mask =
3473 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3474 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3476 intr_context->intr_read_mask =
3477 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3478 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3481 /* The first vector/queue handles
3482 * broadcast/multicast, fatal errors,
3483 * and firmware events. This in addition
3484 * to normal inbound NAPI processing.
3486 intr_context->handler = qlge_isr;
3487 sprintf(intr_context->name, "%s-rx-%d",
3488 qdev->ndev->name, i);
3491 * Inbound queues handle unicast frames only.
3493 intr_context->handler = qlge_msix_rx_isr;
3494 sprintf(intr_context->name, "%s-rx-%d",
3495 qdev->ndev->name, i);
3500 * All rx_rings use the same intr_context since
3501 * there is only one vector.
3503 intr_context->intr = 0;
3504 intr_context->qdev = qdev;
3506 * We set up each vectors enable/disable/read bits so
3507 * there's no bit/mask calculations in the critical path.
3509 intr_context->intr_en_mask =
3510 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3511 intr_context->intr_dis_mask =
3512 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3513 INTR_EN_TYPE_DISABLE;
3514 intr_context->intr_read_mask =
3515 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3517 * Single interrupt means one handler for all rings.
3519 intr_context->handler = qlge_isr;
3520 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3521 /* Set up this vector's bit-mask that indicates
3522 * which queues it services. In this case there is
3523 * a single vector so it will service all RSS and
3524 * TX completion rings.
3526 ql_set_irq_mask(qdev, intr_context);
3528 /* Tell the TX completion rings which MSIx vector
3529 * they will be using.
3531 ql_set_tx_vect(qdev);
3534 static void ql_free_irq(struct ql_adapter *qdev)
3537 struct intr_context *intr_context = &qdev->intr_context[0];
3539 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3540 if (intr_context->hooked) {
3541 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3542 free_irq(qdev->msi_x_entry[i].vector,
3545 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3549 ql_disable_msix(qdev);
3552 static int ql_request_irq(struct ql_adapter *qdev)
3556 struct pci_dev *pdev = qdev->pdev;
3557 struct intr_context *intr_context = &qdev->intr_context[0];
3559 ql_resolve_queues_to_irqs(qdev);
3561 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3562 atomic_set(&intr_context->irq_cnt, 0);
3563 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3564 status = request_irq(qdev->msi_x_entry[i].vector,
3565 intr_context->handler,
3570 netif_err(qdev, ifup, qdev->ndev,
3571 "Failed request for MSIX interrupt %d.\n",
3576 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3577 "trying msi or legacy interrupts.\n");
3578 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3579 "%s: irq = %d.\n", __func__, pdev->irq);
3580 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3581 "%s: context->name = %s.\n", __func__,
3582 intr_context->name);
3583 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3584 "%s: dev_id = 0x%p.\n", __func__,
3587 request_irq(pdev->irq, qlge_isr,
3588 test_bit(QL_MSI_ENABLED,
3590 flags) ? 0 : IRQF_SHARED,
3591 intr_context->name, &qdev->rx_ring[0]);
3595 netif_err(qdev, ifup, qdev->ndev,
3596 "Hooked intr %d, queue type %s, with name %s.\n",
3598 qdev->rx_ring[0].type == DEFAULT_Q ?
3600 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3601 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3602 intr_context->name);
3604 intr_context->hooked = 1;
3608 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
3613 static int ql_start_rss(struct ql_adapter *qdev)
3615 static const u8 init_hash_seed[] = {
3616 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3617 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3618 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3619 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3620 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3622 struct ricb *ricb = &qdev->ricb;
3625 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3627 memset((void *)ricb, 0, sizeof(*ricb));
3629 ricb->base_cq = RSS_L4K;
3631 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3632 ricb->mask = cpu_to_le16((u16)(0x3ff));
3635 * Fill out the Indirection Table.
3637 for (i = 0; i < 1024; i++)
3638 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3640 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3641 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3643 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3645 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3651 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3655 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3658 /* Clear all the entries in the routing table. */
3659 for (i = 0; i < 16; i++) {
3660 status = ql_set_routing_reg(qdev, i, 0, 0);
3662 netif_err(qdev, ifup, qdev->ndev,
3663 "Failed to init routing register for CAM packets.\n");
3667 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3671 /* Initialize the frame-to-queue routing. */
3672 static int ql_route_initialize(struct ql_adapter *qdev)
3676 /* Clear all the entries in the routing table. */
3677 status = ql_clear_routing_entries(qdev);
3681 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3685 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3686 RT_IDX_IP_CSUM_ERR, 1);
3688 netif_err(qdev, ifup, qdev->ndev,
3689 "Failed to init routing register "
3690 "for IP CSUM error packets.\n");
3693 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3694 RT_IDX_TU_CSUM_ERR, 1);
3696 netif_err(qdev, ifup, qdev->ndev,
3697 "Failed to init routing register "
3698 "for TCP/UDP CSUM error packets.\n");
3701 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3703 netif_err(qdev, ifup, qdev->ndev,
3704 "Failed to init routing register for broadcast packets.\n");
3707 /* If we have more than one inbound queue, then turn on RSS in the
3710 if (qdev->rss_ring_count > 1) {
3711 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3712 RT_IDX_RSS_MATCH, 1);
3714 netif_err(qdev, ifup, qdev->ndev,
3715 "Failed to init routing register for MATCH RSS packets.\n");
3720 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3723 netif_err(qdev, ifup, qdev->ndev,
3724 "Failed to init routing register for CAM packets.\n");
3726 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3730 int ql_cam_route_initialize(struct ql_adapter *qdev)
3734 /* If check if the link is up and use to
3735 * determine if we are setting or clearing
3736 * the MAC address in the CAM.
3738 set = ql_read32(qdev, STS);
3739 set &= qdev->port_link_up;
3740 status = ql_set_mac_addr(qdev, set);
3742 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3746 status = ql_route_initialize(qdev);
3748 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3753 static int ql_adapter_initialize(struct ql_adapter *qdev)
3760 * Set up the System register to halt on errors.
3762 value = SYS_EFE | SYS_FAE;
3764 ql_write32(qdev, SYS, mask | value);
3766 /* Set the default queue, and VLAN behavior. */
3767 value = NIC_RCV_CFG_DFQ;
3768 mask = NIC_RCV_CFG_DFQ_MASK;
3769 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
3770 value |= NIC_RCV_CFG_RV;
3771 mask |= (NIC_RCV_CFG_RV << 16);
3773 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3775 /* Set the MPI interrupt to enabled. */
3776 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3778 /* Enable the function, set pagesize, enable error checking. */
3779 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3780 FSC_EC | FSC_VM_PAGE_4K;
3781 value |= SPLT_SETTING;
3783 /* Set/clear header splitting. */
3784 mask = FSC_VM_PAGESIZE_MASK |
3785 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3786 ql_write32(qdev, FSC, mask | value);
3788 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3790 /* Set RX packet routing to use port/pci function on which the
3791 * packet arrived on in addition to usual frame routing.
3792 * This is helpful on bonding where both interfaces can have
3793 * the same MAC address.
3795 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3796 /* Reroute all packets to our Interface.
3797 * They may have been routed to MPI firmware
3800 value = ql_read32(qdev, MGMT_RCV_CFG);
3801 value &= ~MGMT_RCV_CFG_RM;
3804 /* Sticky reg needs clearing due to WOL. */
3805 ql_write32(qdev, MGMT_RCV_CFG, mask);
3806 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3808 /* Default WOL is enable on Mezz cards */
3809 if (qdev->pdev->subsystem_device == 0x0068 ||
3810 qdev->pdev->subsystem_device == 0x0180)
3811 qdev->wol = WAKE_MAGIC;
3813 /* Start up the rx queues. */
3814 for (i = 0; i < qdev->rx_ring_count; i++) {
3815 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3817 netif_err(qdev, ifup, qdev->ndev,
3818 "Failed to start rx ring[%d].\n", i);
3823 /* If there is more than one inbound completion queue
3824 * then download a RICB to configure RSS.
3826 if (qdev->rss_ring_count > 1) {
3827 status = ql_start_rss(qdev);
3829 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3834 /* Start up the tx queues. */
3835 for (i = 0; i < qdev->tx_ring_count; i++) {
3836 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3838 netif_err(qdev, ifup, qdev->ndev,
3839 "Failed to start tx ring[%d].\n", i);
3844 /* Initialize the port and set the max framesize. */
3845 status = qdev->nic_ops->port_initialize(qdev);
3847 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3849 /* Set up the MAC address and frame routing filter. */
3850 status = ql_cam_route_initialize(qdev);
3852 netif_err(qdev, ifup, qdev->ndev,
3853 "Failed to init CAM/Routing tables.\n");
3857 /* Start NAPI for the RSS queues. */
3858 for (i = 0; i < qdev->rss_ring_count; i++)
3859 napi_enable(&qdev->rx_ring[i].napi);
3864 /* Issue soft reset to chip. */
3865 static int ql_adapter_reset(struct ql_adapter *qdev)
3869 unsigned long end_jiffies;
3871 /* Clear all the entries in the routing table. */
3872 status = ql_clear_routing_entries(qdev);
3874 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3878 /* Check if bit is set then skip the mailbox command and
3879 * clear the bit, else we are in normal reset process.
3881 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3882 /* Stop management traffic. */
3883 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3885 /* Wait for the NIC and MGMNT FIFOs to empty. */
3886 ql_wait_fifo_empty(qdev);
3888 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3890 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3892 end_jiffies = jiffies + usecs_to_jiffies(30);
3894 value = ql_read32(qdev, RST_FO);
3895 if ((value & RST_FO_FR) == 0)
3898 } while (time_before(jiffies, end_jiffies));
3900 if (value & RST_FO_FR) {
3901 netif_err(qdev, ifdown, qdev->ndev,
3902 "ETIMEDOUT!!! errored out of resetting the chip!\n");
3903 status = -ETIMEDOUT;
3906 /* Resume management traffic. */
3907 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3911 static void ql_display_dev_info(struct net_device *ndev)
3913 struct ql_adapter *qdev = netdev_priv(ndev);
3915 netif_info(qdev, probe, qdev->ndev,
3916 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3917 "XG Roll = %d, XG Rev = %d.\n",
3920 qdev->chip_rev_id & 0x0000000f,
3921 qdev->chip_rev_id >> 4 & 0x0000000f,
3922 qdev->chip_rev_id >> 8 & 0x0000000f,
3923 qdev->chip_rev_id >> 12 & 0x0000000f);
3924 netif_info(qdev, probe, qdev->ndev,
3925 "MAC address %pM\n", ndev->dev_addr);
3928 static int ql_wol(struct ql_adapter *qdev)
3931 u32 wol = MB_WOL_DISABLE;
3933 /* The CAM is still intact after a reset, but if we
3934 * are doing WOL, then we may need to program the
3935 * routing regs. We would also need to issue the mailbox
3936 * commands to instruct the MPI what to do per the ethtool
3940 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3941 WAKE_MCAST | WAKE_BCAST)) {
3942 netif_err(qdev, ifdown, qdev->ndev,
3943 "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
3948 if (qdev->wol & WAKE_MAGIC) {
3949 status = ql_mb_wol_set_magic(qdev, 1);
3951 netif_err(qdev, ifdown, qdev->ndev,
3952 "Failed to set magic packet on %s.\n",
3956 netif_info(qdev, drv, qdev->ndev,
3957 "Enabled magic packet successfully on %s.\n",
3960 wol |= MB_WOL_MAGIC_PKT;
3964 wol |= MB_WOL_MODE_ON;
3965 status = ql_mb_wol_mode(qdev, wol);
3966 netif_err(qdev, drv, qdev->ndev,
3967 "WOL %s (wol code 0x%x) on %s\n",
3968 (status == 0) ? "Successfully set" : "Failed",
3969 wol, qdev->ndev->name);
3975 static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3978 /* Don't kill the reset worker thread if we
3979 * are in the process of recovery.
3981 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3982 cancel_delayed_work_sync(&qdev->asic_reset_work);
3983 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3984 cancel_delayed_work_sync(&qdev->mpi_work);
3985 cancel_delayed_work_sync(&qdev->mpi_idc_work);
3986 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3987 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3990 static int ql_adapter_down(struct ql_adapter *qdev)
3996 ql_cancel_all_work_sync(qdev);
3998 for (i = 0; i < qdev->rss_ring_count; i++)
3999 napi_disable(&qdev->rx_ring[i].napi);
4001 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4003 ql_disable_interrupts(qdev);
4005 ql_tx_ring_clean(qdev);
4007 /* Call netif_napi_del() from common point.
4009 for (i = 0; i < qdev->rss_ring_count; i++)
4010 netif_napi_del(&qdev->rx_ring[i].napi);
4012 status = ql_adapter_reset(qdev);
4014 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
4016 ql_free_rx_buffers(qdev);
4021 static int ql_adapter_up(struct ql_adapter *qdev)
4025 err = ql_adapter_initialize(qdev);
4027 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
4030 set_bit(QL_ADAPTER_UP, &qdev->flags);
4031 ql_alloc_rx_buffers(qdev);
4032 /* If the port is initialized and the
4033 * link is up the turn on the carrier.
4035 if ((ql_read32(qdev, STS) & qdev->port_init) &&
4036 (ql_read32(qdev, STS) & qdev->port_link_up))
4038 /* Restore rx mode. */
4039 clear_bit(QL_ALLMULTI, &qdev->flags);
4040 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4041 qlge_set_multicast_list(qdev->ndev);
4043 /* Restore vlan setting. */
4044 qlge_restore_vlan(qdev);
4046 ql_enable_interrupts(qdev);
4047 ql_enable_all_completion_interrupts(qdev);
4048 netif_tx_start_all_queues(qdev->ndev);
4052 ql_adapter_reset(qdev);
4056 static void ql_release_adapter_resources(struct ql_adapter *qdev)
4058 ql_free_mem_resources(qdev);
4062 static int ql_get_adapter_resources(struct ql_adapter *qdev)
4066 if (ql_alloc_mem_resources(qdev)) {
4067 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
4070 status = ql_request_irq(qdev);
4074 static int qlge_close(struct net_device *ndev)
4076 struct ql_adapter *qdev = netdev_priv(ndev);
4078 /* If we hit pci_channel_io_perm_failure
4079 * failure condition, then we already
4080 * brought the adapter down.
4082 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
4083 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
4084 clear_bit(QL_EEH_FATAL, &qdev->flags);
4089 * Wait for device to recover from a reset.
4090 * (Rarely happens, but possible.)
4092 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4094 ql_adapter_down(qdev);
4095 ql_release_adapter_resources(qdev);
4099 static int ql_configure_rings(struct ql_adapter *qdev)
4102 struct rx_ring *rx_ring;
4103 struct tx_ring *tx_ring;
4104 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
4105 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4106 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4108 qdev->lbq_buf_order = get_order(lbq_buf_len);
4110 /* In a perfect world we have one RSS ring for each CPU
4111 * and each has it's own vector. To do that we ask for
4112 * cpu_cnt vectors. ql_enable_msix() will adjust the
4113 * vector count to what we actually get. We then
4114 * allocate an RSS ring for each.
4115 * Essentially, we are doing min(cpu_count, msix_vector_count).
4117 qdev->intr_count = cpu_cnt;
4118 ql_enable_msix(qdev);
4119 /* Adjust the RSS ring count to the actual vector count. */
4120 qdev->rss_ring_count = qdev->intr_count;
4121 qdev->tx_ring_count = cpu_cnt;
4122 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
4124 for (i = 0; i < qdev->tx_ring_count; i++) {
4125 tx_ring = &qdev->tx_ring[i];
4126 memset((void *)tx_ring, 0, sizeof(*tx_ring));
4127 tx_ring->qdev = qdev;
4129 tx_ring->wq_len = qdev->tx_ring_size;
4131 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4134 * The completion queue ID for the tx rings start
4135 * immediately after the rss rings.
4137 tx_ring->cq_id = qdev->rss_ring_count + i;
4140 for (i = 0; i < qdev->rx_ring_count; i++) {
4141 rx_ring = &qdev->rx_ring[i];
4142 memset((void *)rx_ring, 0, sizeof(*rx_ring));
4143 rx_ring->qdev = qdev;
4145 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
4146 if (i < qdev->rss_ring_count) {
4148 * Inbound (RSS) queues.
4150 rx_ring->cq_len = qdev->rx_ring_size;
4152 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4153 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4155 rx_ring->lbq_len * sizeof(__le64);
4156 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4157 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4159 rx_ring->sbq_len * sizeof(__le64);
4160 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
4161 rx_ring->type = RX_Q;
4164 * Outbound queue handles outbound completions only.
4166 /* outbound cq is same size as tx_ring it services. */
4167 rx_ring->cq_len = qdev->tx_ring_size;
4169 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4170 rx_ring->lbq_len = 0;
4171 rx_ring->lbq_size = 0;
4172 rx_ring->lbq_buf_size = 0;
4173 rx_ring->sbq_len = 0;
4174 rx_ring->sbq_size = 0;
4175 rx_ring->sbq_buf_size = 0;
4176 rx_ring->type = TX_Q;
4182 static int qlge_open(struct net_device *ndev)
4185 struct ql_adapter *qdev = netdev_priv(ndev);
4187 err = ql_adapter_reset(qdev);
4191 err = ql_configure_rings(qdev);
4195 err = ql_get_adapter_resources(qdev);
4199 err = ql_adapter_up(qdev);
4206 ql_release_adapter_resources(qdev);
4210 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4212 struct rx_ring *rx_ring;
4216 /* Wait for an outstanding reset to complete. */
4217 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4220 while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4221 netif_err(qdev, ifup, qdev->ndev,
4222 "Waiting for adapter UP...\n");
4227 netif_err(qdev, ifup, qdev->ndev,
4228 "Timed out waiting for adapter UP\n");
4233 status = ql_adapter_down(qdev);
4237 /* Get the new rx buffer size. */
4238 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4239 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4240 qdev->lbq_buf_order = get_order(lbq_buf_len);
4242 for (i = 0; i < qdev->rss_ring_count; i++) {
4243 rx_ring = &qdev->rx_ring[i];
4244 /* Set the new size. */
4245 rx_ring->lbq_buf_size = lbq_buf_len;
4248 status = ql_adapter_up(qdev);
4254 netif_alert(qdev, ifup, qdev->ndev,
4255 "Driver up/down cycle failed, closing device.\n");
4256 set_bit(QL_ADAPTER_UP, &qdev->flags);
4257 dev_close(qdev->ndev);
4261 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4263 struct ql_adapter *qdev = netdev_priv(ndev);
4266 if (ndev->mtu == 1500 && new_mtu == 9000) {
4267 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4268 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
4269 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4273 queue_delayed_work(qdev->workqueue,
4274 &qdev->mpi_port_cfg_work, 3*HZ);
4276 ndev->mtu = new_mtu;
4278 if (!netif_running(qdev->ndev)) {
4282 status = ql_change_rx_buffers(qdev);
4284 netif_err(qdev, ifup, qdev->ndev,
4285 "Changing MTU failed.\n");
4291 static struct net_device_stats *qlge_get_stats(struct net_device
4294 struct ql_adapter *qdev = netdev_priv(ndev);
4295 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4296 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4297 unsigned long pkts, mcast, dropped, errors, bytes;
4301 pkts = mcast = dropped = errors = bytes = 0;
4302 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4303 pkts += rx_ring->rx_packets;
4304 bytes += rx_ring->rx_bytes;
4305 dropped += rx_ring->rx_dropped;
4306 errors += rx_ring->rx_errors;
4307 mcast += rx_ring->rx_multicast;
4309 ndev->stats.rx_packets = pkts;
4310 ndev->stats.rx_bytes = bytes;
4311 ndev->stats.rx_dropped = dropped;
4312 ndev->stats.rx_errors = errors;
4313 ndev->stats.multicast = mcast;
4316 pkts = errors = bytes = 0;
4317 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4318 pkts += tx_ring->tx_packets;
4319 bytes += tx_ring->tx_bytes;
4320 errors += tx_ring->tx_errors;
4322 ndev->stats.tx_packets = pkts;
4323 ndev->stats.tx_bytes = bytes;
4324 ndev->stats.tx_errors = errors;
4325 return &ndev->stats;
4328 static void qlge_set_multicast_list(struct net_device *ndev)
4330 struct ql_adapter *qdev = netdev_priv(ndev);
4331 struct netdev_hw_addr *ha;
4334 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4338 * Set or clear promiscuous mode if a
4339 * transition is taking place.
4341 if (ndev->flags & IFF_PROMISC) {
4342 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4343 if (ql_set_routing_reg
4344 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4345 netif_err(qdev, hw, qdev->ndev,
4346 "Failed to set promiscuous mode.\n");
4348 set_bit(QL_PROMISCUOUS, &qdev->flags);
4352 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4353 if (ql_set_routing_reg
4354 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4355 netif_err(qdev, hw, qdev->ndev,
4356 "Failed to clear promiscuous mode.\n");
4358 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4364 * Set or clear all multicast mode if a
4365 * transition is taking place.
4367 if ((ndev->flags & IFF_ALLMULTI) ||
4368 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4369 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4370 if (ql_set_routing_reg
4371 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4372 netif_err(qdev, hw, qdev->ndev,
4373 "Failed to set all-multi mode.\n");
4375 set_bit(QL_ALLMULTI, &qdev->flags);
4379 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4380 if (ql_set_routing_reg
4381 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4382 netif_err(qdev, hw, qdev->ndev,
4383 "Failed to clear all-multi mode.\n");
4385 clear_bit(QL_ALLMULTI, &qdev->flags);
4390 if (!netdev_mc_empty(ndev)) {
4391 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4395 netdev_for_each_mc_addr(ha, ndev) {
4396 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4397 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4398 netif_err(qdev, hw, qdev->ndev,
4399 "Failed to loadmulticast address.\n");
4400 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4405 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4406 if (ql_set_routing_reg
4407 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4408 netif_err(qdev, hw, qdev->ndev,
4409 "Failed to set multicast match mode.\n");
4411 set_bit(QL_ALLMULTI, &qdev->flags);
4415 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4418 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4420 struct ql_adapter *qdev = netdev_priv(ndev);
4421 struct sockaddr *addr = p;
4424 if (!is_valid_ether_addr(addr->sa_data))
4425 return -EADDRNOTAVAIL;
4426 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4427 /* Update local copy of current mac address. */
4428 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4430 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4433 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4434 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4436 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4437 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4441 static void qlge_tx_timeout(struct net_device *ndev)
4443 struct ql_adapter *qdev = netdev_priv(ndev);
4444 ql_queue_asic_error(qdev);
4447 static void ql_asic_reset_work(struct work_struct *work)
4449 struct ql_adapter *qdev =
4450 container_of(work, struct ql_adapter, asic_reset_work.work);
4453 status = ql_adapter_down(qdev);
4457 status = ql_adapter_up(qdev);
4461 /* Restore rx mode. */
4462 clear_bit(QL_ALLMULTI, &qdev->flags);
4463 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4464 qlge_set_multicast_list(qdev->ndev);
4469 netif_alert(qdev, ifup, qdev->ndev,
4470 "Driver up/down cycle failed, closing device\n");
4472 set_bit(QL_ADAPTER_UP, &qdev->flags);
4473 dev_close(qdev->ndev);
4477 static const struct nic_operations qla8012_nic_ops = {
4478 .get_flash = ql_get_8012_flash_params,
4479 .port_initialize = ql_8012_port_initialize,
4482 static const struct nic_operations qla8000_nic_ops = {
4483 .get_flash = ql_get_8000_flash_params,
4484 .port_initialize = ql_8000_port_initialize,
4487 /* Find the pcie function number for the other NIC
4488 * on this chip. Since both NIC functions share a
4489 * common firmware we have the lowest enabled function
4490 * do any common work. Examples would be resetting
4491 * after a fatal firmware error, or doing a firmware
4494 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4498 u32 nic_func1, nic_func2;
4500 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4505 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4506 MPI_TEST_NIC_FUNC_MASK);
4507 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4508 MPI_TEST_NIC_FUNC_MASK);
4510 if (qdev->func == nic_func1)
4511 qdev->alt_func = nic_func2;
4512 else if (qdev->func == nic_func2)
4513 qdev->alt_func = nic_func1;
4520 static int ql_get_board_info(struct ql_adapter *qdev)
4524 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4528 status = ql_get_alt_pcie_func(qdev);
4532 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4534 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4535 qdev->port_link_up = STS_PL1;
4536 qdev->port_init = STS_PI1;
4537 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4538 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4540 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4541 qdev->port_link_up = STS_PL0;
4542 qdev->port_init = STS_PI0;
4543 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4544 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4546 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4547 qdev->device_id = qdev->pdev->device;
4548 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4549 qdev->nic_ops = &qla8012_nic_ops;
4550 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4551 qdev->nic_ops = &qla8000_nic_ops;
4555 static void ql_release_all(struct pci_dev *pdev)
4557 struct net_device *ndev = pci_get_drvdata(pdev);
4558 struct ql_adapter *qdev = netdev_priv(ndev);
4560 if (qdev->workqueue) {
4561 destroy_workqueue(qdev->workqueue);
4562 qdev->workqueue = NULL;
4566 iounmap(qdev->reg_base);
4567 if (qdev->doorbell_area)
4568 iounmap(qdev->doorbell_area);
4569 vfree(qdev->mpi_coredump);
4570 pci_release_regions(pdev);
4573 static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
4576 struct ql_adapter *qdev = netdev_priv(ndev);
4579 memset((void *)qdev, 0, sizeof(*qdev));
4580 err = pci_enable_device(pdev);
4582 dev_err(&pdev->dev, "PCI device enable failed.\n");
4588 pci_set_drvdata(pdev, ndev);
4590 /* Set PCIe read request size */
4591 err = pcie_set_readrq(pdev, 4096);
4593 dev_err(&pdev->dev, "Set readrq failed.\n");
4597 err = pci_request_regions(pdev, DRV_NAME);
4599 dev_err(&pdev->dev, "PCI region request failed.\n");
4603 pci_set_master(pdev);
4604 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4605 set_bit(QL_DMA64, &qdev->flags);
4606 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4608 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4610 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4614 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4618 /* Set PCIe reset type for EEH to fundamental. */
4619 pdev->needs_freset = 1;
4620 pci_save_state(pdev);
4622 ioremap_nocache(pci_resource_start(pdev, 1),
4623 pci_resource_len(pdev, 1));
4624 if (!qdev->reg_base) {
4625 dev_err(&pdev->dev, "Register mapping failed.\n");
4630 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4631 qdev->doorbell_area =
4632 ioremap_nocache(pci_resource_start(pdev, 3),
4633 pci_resource_len(pdev, 3));
4634 if (!qdev->doorbell_area) {
4635 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4640 err = ql_get_board_info(qdev);
4642 dev_err(&pdev->dev, "Register access failed.\n");
4646 qdev->msg_enable = netif_msg_init(debug, default_msg);
4647 spin_lock_init(&qdev->hw_lock);
4648 spin_lock_init(&qdev->stats_lock);
4650 if (qlge_mpi_coredump) {
4651 qdev->mpi_coredump =
4652 vmalloc(sizeof(struct ql_mpi_coredump));
4653 if (qdev->mpi_coredump == NULL) {
4657 if (qlge_force_coredump)
4658 set_bit(QL_FRC_COREDUMP, &qdev->flags);
4660 /* make sure the EEPROM is good */
4661 err = qdev->nic_ops->get_flash(qdev);
4663 dev_err(&pdev->dev, "Invalid FLASH.\n");
4667 /* Keep local copy of current mac address. */
4668 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4670 /* Set up the default ring sizes. */
4671 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4672 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4674 /* Set up the coalescing parameters. */
4675 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4676 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4677 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4678 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4681 * Set up the operating parameters.
4683 qdev->workqueue = alloc_ordered_workqueue(ndev->name, WQ_MEM_RECLAIM);
4684 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4685 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4686 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4687 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4688 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4689 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4690 init_completion(&qdev->ide_completion);
4691 mutex_init(&qdev->mpi_mutex);
4694 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4695 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4696 DRV_NAME, DRV_VERSION);
4700 ql_release_all(pdev);
4702 pci_disable_device(pdev);
4706 static const struct net_device_ops qlge_netdev_ops = {
4707 .ndo_open = qlge_open,
4708 .ndo_stop = qlge_close,
4709 .ndo_start_xmit = qlge_send,
4710 .ndo_change_mtu = qlge_change_mtu,
4711 .ndo_get_stats = qlge_get_stats,
4712 .ndo_set_rx_mode = qlge_set_multicast_list,
4713 .ndo_set_mac_address = qlge_set_mac_address,
4714 .ndo_validate_addr = eth_validate_addr,
4715 .ndo_tx_timeout = qlge_tx_timeout,
4716 .ndo_set_features = qlge_set_features,
4717 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4718 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
4721 static void ql_timer(unsigned long data)
4723 struct ql_adapter *qdev = (struct ql_adapter *)data;
4726 var = ql_read32(qdev, STS);
4727 if (pci_channel_offline(qdev->pdev)) {
4728 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4732 mod_timer(&qdev->timer, jiffies + (5*HZ));
4735 static int qlge_probe(struct pci_dev *pdev,
4736 const struct pci_device_id *pci_entry)
4738 struct net_device *ndev = NULL;
4739 struct ql_adapter *qdev = NULL;
4740 static int cards_found = 0;
4743 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4744 min(MAX_CPUS, netif_get_num_default_rss_queues()));
4748 err = ql_init_device(pdev, ndev, cards_found);
4754 qdev = netdev_priv(ndev);
4755 SET_NETDEV_DEV(ndev, &pdev->dev);
4756 ndev->hw_features = NETIF_F_SG |
4760 NETIF_F_HW_VLAN_CTAG_TX |
4761 NETIF_F_HW_VLAN_CTAG_RX |
4762 NETIF_F_HW_VLAN_CTAG_FILTER |
4764 ndev->features = ndev->hw_features;
4765 ndev->vlan_features = ndev->hw_features;
4766 /* vlan gets same features (except vlan filter) */
4767 ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
4768 NETIF_F_HW_VLAN_CTAG_TX |
4769 NETIF_F_HW_VLAN_CTAG_RX);
4771 if (test_bit(QL_DMA64, &qdev->flags))
4772 ndev->features |= NETIF_F_HIGHDMA;
4775 * Set up net_device structure.
4777 ndev->tx_queue_len = qdev->tx_ring_size;
4778 ndev->irq = pdev->irq;
4780 ndev->netdev_ops = &qlge_netdev_ops;
4781 ndev->ethtool_ops = &qlge_ethtool_ops;
4782 ndev->watchdog_timeo = 10 * HZ;
4784 err = register_netdev(ndev);
4786 dev_err(&pdev->dev, "net device registration failed.\n");
4787 ql_release_all(pdev);
4788 pci_disable_device(pdev);
4792 /* Start up the timer to trigger EEH if
4795 init_timer_deferrable(&qdev->timer);
4796 qdev->timer.data = (unsigned long)qdev;
4797 qdev->timer.function = ql_timer;
4798 qdev->timer.expires = jiffies + (5*HZ);
4799 add_timer(&qdev->timer);
4801 ql_display_dev_info(ndev);
4802 atomic_set(&qdev->lb_count, 0);
4807 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4809 return qlge_send(skb, ndev);
4812 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4814 return ql_clean_inbound_rx_ring(rx_ring, budget);
4817 static void qlge_remove(struct pci_dev *pdev)
4819 struct net_device *ndev = pci_get_drvdata(pdev);
4820 struct ql_adapter *qdev = netdev_priv(ndev);
4821 del_timer_sync(&qdev->timer);
4822 ql_cancel_all_work_sync(qdev);
4823 unregister_netdev(ndev);
4824 ql_release_all(pdev);
4825 pci_disable_device(pdev);
4829 /* Clean up resources without touching hardware. */
4830 static void ql_eeh_close(struct net_device *ndev)
4833 struct ql_adapter *qdev = netdev_priv(ndev);
4835 if (netif_carrier_ok(ndev)) {
4836 netif_carrier_off(ndev);
4837 netif_stop_queue(ndev);
4840 /* Disabling the timer */
4841 ql_cancel_all_work_sync(qdev);
4843 for (i = 0; i < qdev->rss_ring_count; i++)
4844 netif_napi_del(&qdev->rx_ring[i].napi);
4846 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4847 ql_tx_ring_clean(qdev);
4848 ql_free_rx_buffers(qdev);
4849 ql_release_adapter_resources(qdev);
4853 * This callback is called by the PCI subsystem whenever
4854 * a PCI bus error is detected.
4856 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4857 enum pci_channel_state state)
4859 struct net_device *ndev = pci_get_drvdata(pdev);
4860 struct ql_adapter *qdev = netdev_priv(ndev);
4863 case pci_channel_io_normal:
4864 return PCI_ERS_RESULT_CAN_RECOVER;
4865 case pci_channel_io_frozen:
4866 netif_device_detach(ndev);
4867 del_timer_sync(&qdev->timer);
4868 if (netif_running(ndev))
4870 pci_disable_device(pdev);
4871 return PCI_ERS_RESULT_NEED_RESET;
4872 case pci_channel_io_perm_failure:
4874 "%s: pci_channel_io_perm_failure.\n", __func__);
4875 del_timer_sync(&qdev->timer);
4877 set_bit(QL_EEH_FATAL, &qdev->flags);
4878 return PCI_ERS_RESULT_DISCONNECT;
4881 /* Request a slot reset. */
4882 return PCI_ERS_RESULT_NEED_RESET;
4886 * This callback is called after the PCI buss has been reset.
4887 * Basically, this tries to restart the card from scratch.
4888 * This is a shortened version of the device probe/discovery code,
4889 * it resembles the first-half of the () routine.
4891 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4893 struct net_device *ndev = pci_get_drvdata(pdev);
4894 struct ql_adapter *qdev = netdev_priv(ndev);
4896 pdev->error_state = pci_channel_io_normal;
4898 pci_restore_state(pdev);
4899 if (pci_enable_device(pdev)) {
4900 netif_err(qdev, ifup, qdev->ndev,
4901 "Cannot re-enable PCI device after reset.\n");
4902 return PCI_ERS_RESULT_DISCONNECT;
4904 pci_set_master(pdev);
4906 if (ql_adapter_reset(qdev)) {
4907 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4908 set_bit(QL_EEH_FATAL, &qdev->flags);
4909 return PCI_ERS_RESULT_DISCONNECT;
4912 return PCI_ERS_RESULT_RECOVERED;
4915 static void qlge_io_resume(struct pci_dev *pdev)
4917 struct net_device *ndev = pci_get_drvdata(pdev);
4918 struct ql_adapter *qdev = netdev_priv(ndev);
4921 if (netif_running(ndev)) {
4922 err = qlge_open(ndev);
4924 netif_err(qdev, ifup, qdev->ndev,
4925 "Device initialization failed after reset.\n");
4929 netif_err(qdev, ifup, qdev->ndev,
4930 "Device was not running prior to EEH.\n");
4932 mod_timer(&qdev->timer, jiffies + (5*HZ));
4933 netif_device_attach(ndev);
4936 static const struct pci_error_handlers qlge_err_handler = {
4937 .error_detected = qlge_io_error_detected,
4938 .slot_reset = qlge_io_slot_reset,
4939 .resume = qlge_io_resume,
4942 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4944 struct net_device *ndev = pci_get_drvdata(pdev);
4945 struct ql_adapter *qdev = netdev_priv(ndev);
4948 netif_device_detach(ndev);
4949 del_timer_sync(&qdev->timer);
4951 if (netif_running(ndev)) {
4952 err = ql_adapter_down(qdev);
4958 err = pci_save_state(pdev);
4962 pci_disable_device(pdev);
4964 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4970 static int qlge_resume(struct pci_dev *pdev)
4972 struct net_device *ndev = pci_get_drvdata(pdev);
4973 struct ql_adapter *qdev = netdev_priv(ndev);
4976 pci_set_power_state(pdev, PCI_D0);
4977 pci_restore_state(pdev);
4978 err = pci_enable_device(pdev);
4980 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4983 pci_set_master(pdev);
4985 pci_enable_wake(pdev, PCI_D3hot, 0);
4986 pci_enable_wake(pdev, PCI_D3cold, 0);
4988 if (netif_running(ndev)) {
4989 err = ql_adapter_up(qdev);
4994 mod_timer(&qdev->timer, jiffies + (5*HZ));
4995 netif_device_attach(ndev);
4999 #endif /* CONFIG_PM */
5001 static void qlge_shutdown(struct pci_dev *pdev)
5003 qlge_suspend(pdev, PMSG_SUSPEND);
5006 static struct pci_driver qlge_driver = {
5008 .id_table = qlge_pci_tbl,
5009 .probe = qlge_probe,
5010 .remove = qlge_remove,
5012 .suspend = qlge_suspend,
5013 .resume = qlge_resume,
5015 .shutdown = qlge_shutdown,
5016 .err_handler = &qlge_err_handler
5019 module_pci_driver(qlge_driver);