2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
8 #include <linux/kernel.h>
9 #include <linux/bitops.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
27 #include <linux/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/if_vlan.h>
37 #include <linux/skbuff.h>
38 #include <linux/delay.h>
40 #include <linux/vmalloc.h>
41 #include <linux/prefetch.h>
42 #include <net/ip6_checksum.h>
46 char qlge_driver_name[] = DRV_NAME;
47 const char qlge_driver_version[] = DRV_VERSION;
49 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
50 MODULE_DESCRIPTION(DRV_STRING " ");
51 MODULE_LICENSE("GPL");
52 MODULE_VERSION(DRV_VERSION);
54 static const u32 default_msg =
55 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
56 /* NETIF_MSG_TIMER | */
61 /* NETIF_MSG_TX_QUEUED | */
62 /* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
63 /* NETIF_MSG_PKTDATA | */
64 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
66 static int debug = -1; /* defaults above */
67 module_param(debug, int, 0664);
68 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
73 static int qlge_irq_type = MSIX_IRQ;
74 module_param(qlge_irq_type, int, 0664);
75 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
77 static int qlge_mpi_coredump;
78 module_param(qlge_mpi_coredump, int, 0);
79 MODULE_PARM_DESC(qlge_mpi_coredump,
80 "Option to enable MPI firmware dump. "
81 "Default is OFF - Do Not allocate memory. ");
83 static int qlge_force_coredump;
84 module_param(qlge_force_coredump, int, 0);
85 MODULE_PARM_DESC(qlge_force_coredump,
86 "Option to allow force of firmware core dump. "
87 "Default is OFF - Do not allow.");
89 static const struct pci_device_id qlge_pci_tbl[] = {
90 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
91 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
92 /* required last entry */
96 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
98 static int ql_wol(struct ql_adapter *);
99 static void qlge_set_multicast_list(struct net_device *);
100 static int ql_adapter_down(struct ql_adapter *);
101 static int ql_adapter_up(struct ql_adapter *);
103 /* This hardware semaphore causes exclusive access to
104 * resources shared between the NIC driver, MPI firmware,
105 * FCOE firmware and the FC driver.
107 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
112 case SEM_XGMAC0_MASK:
113 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
115 case SEM_XGMAC1_MASK:
116 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
119 sem_bits = SEM_SET << SEM_ICB_SHIFT;
121 case SEM_MAC_ADDR_MASK:
122 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
125 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
128 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
130 case SEM_RT_IDX_MASK:
131 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
133 case SEM_PROC_REG_MASK:
134 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
137 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
141 ql_write32(qdev, SEM, sem_bits | sem_mask);
142 return !(ql_read32(qdev, SEM) & sem_bits);
145 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
147 unsigned int wait_count = 30;
149 if (!ql_sem_trylock(qdev, sem_mask))
152 } while (--wait_count);
156 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
158 ql_write32(qdev, SEM, sem_mask);
159 ql_read32(qdev, SEM); /* flush */
162 /* This function waits for a specific bit to come ready
163 * in a given register. It is used mostly by the initialize
164 * process, but is also used in kernel thread API such as
165 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
167 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
170 int count = UDELAY_COUNT;
173 temp = ql_read32(qdev, reg);
175 /* check for errors */
176 if (temp & err_bit) {
177 netif_alert(qdev, probe, qdev->ndev,
178 "register 0x%.08x access error, value = 0x%.08x!.\n",
181 } else if (temp & bit)
183 udelay(UDELAY_DELAY);
186 netif_alert(qdev, probe, qdev->ndev,
187 "Timed out waiting for reg %x to come ready.\n", reg);
191 /* The CFG register is used to download TX and RX control blocks
192 * to the chip. This function waits for an operation to complete.
194 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
196 int count = UDELAY_COUNT;
200 temp = ql_read32(qdev, CFG);
205 udelay(UDELAY_DELAY);
212 /* Used to issue init control blocks to hw. Maps control block,
213 * sets address, triggers download, waits for completion.
215 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
225 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
228 map = pci_map_single(qdev->pdev, ptr, size, direction);
229 if (pci_dma_mapping_error(qdev->pdev, map)) {
230 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
234 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
238 status = ql_wait_cfg(qdev, bit);
240 netif_err(qdev, ifup, qdev->ndev,
241 "Timed out waiting for CFG to come ready.\n");
245 ql_write32(qdev, ICB_L, (u32) map);
246 ql_write32(qdev, ICB_H, (u32) (map >> 32));
248 mask = CFG_Q_MASK | (bit << 16);
249 value = bit | (q_id << CFG_Q_SHIFT);
250 ql_write32(qdev, CFG, (mask | value));
253 * Wait for the bit to clear after signaling hw.
255 status = ql_wait_cfg(qdev, bit);
257 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
258 pci_unmap_single(qdev->pdev, map, size, direction);
262 /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
263 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
270 case MAC_ADDR_TYPE_MULTI_MAC:
271 case MAC_ADDR_TYPE_CAM_MAC:
274 ql_wait_reg_rdy(qdev,
275 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
278 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
279 (index << MAC_ADDR_IDX_SHIFT) | /* index */
280 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
282 ql_wait_reg_rdy(qdev,
283 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
286 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
288 ql_wait_reg_rdy(qdev,
289 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
292 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
293 (index << MAC_ADDR_IDX_SHIFT) | /* index */
294 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
296 ql_wait_reg_rdy(qdev,
297 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
300 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
301 if (type == MAC_ADDR_TYPE_CAM_MAC) {
303 ql_wait_reg_rdy(qdev,
304 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
307 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
308 (index << MAC_ADDR_IDX_SHIFT) | /* index */
309 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
311 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
315 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
319 case MAC_ADDR_TYPE_VLAN:
320 case MAC_ADDR_TYPE_MULTI_FLTR:
322 netif_crit(qdev, ifup, qdev->ndev,
323 "Address type %d not yet supported.\n", type);
330 /* Set up a MAC, multicast or VLAN address for the
331 * inbound frame matching.
333 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
340 case MAC_ADDR_TYPE_MULTI_MAC:
342 u32 upper = (addr[0] << 8) | addr[1];
343 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
344 (addr[4] << 8) | (addr[5]);
347 ql_wait_reg_rdy(qdev,
348 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
351 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
352 (index << MAC_ADDR_IDX_SHIFT) |
354 ql_write32(qdev, MAC_ADDR_DATA, lower);
356 ql_wait_reg_rdy(qdev,
357 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
360 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
361 (index << MAC_ADDR_IDX_SHIFT) |
364 ql_write32(qdev, MAC_ADDR_DATA, upper);
366 ql_wait_reg_rdy(qdev,
367 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
372 case MAC_ADDR_TYPE_CAM_MAC:
375 u32 upper = (addr[0] << 8) | addr[1];
377 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
380 ql_wait_reg_rdy(qdev,
381 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
384 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
385 (index << MAC_ADDR_IDX_SHIFT) | /* index */
387 ql_write32(qdev, MAC_ADDR_DATA, lower);
389 ql_wait_reg_rdy(qdev,
390 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
393 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
394 (index << MAC_ADDR_IDX_SHIFT) | /* index */
396 ql_write32(qdev, MAC_ADDR_DATA, upper);
398 ql_wait_reg_rdy(qdev,
399 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
402 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
403 (index << MAC_ADDR_IDX_SHIFT) | /* index */
405 /* This field should also include the queue id
406 and possibly the function id. Right now we hardcode
407 the route field to NIC core.
409 cam_output = (CAM_OUT_ROUTE_NIC |
411 func << CAM_OUT_FUNC_SHIFT) |
412 (0 << CAM_OUT_CQ_ID_SHIFT));
413 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
414 cam_output |= CAM_OUT_RV;
415 /* route to NIC core */
416 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
419 case MAC_ADDR_TYPE_VLAN:
421 u32 enable_bit = *((u32 *) &addr[0]);
422 /* For VLAN, the addr actually holds a bit that
423 * either enables or disables the vlan id we are
424 * addressing. It's either MAC_ADDR_E on or off.
425 * That's bit-27 we're talking about.
428 ql_wait_reg_rdy(qdev,
429 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
432 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
433 (index << MAC_ADDR_IDX_SHIFT) | /* index */
435 enable_bit); /* enable/disable */
438 case MAC_ADDR_TYPE_MULTI_FLTR:
440 netif_crit(qdev, ifup, qdev->ndev,
441 "Address type %d not yet supported.\n", type);
448 /* Set or clear MAC address in hardware. We sometimes
449 * have to clear it to prevent wrong frame routing
450 * especially in a bonding environment.
452 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
455 char zero_mac_addr[ETH_ALEN];
459 addr = &qdev->current_mac_addr[0];
460 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
461 "Set Mac addr %pM\n", addr);
463 eth_zero_addr(zero_mac_addr);
464 addr = &zero_mac_addr[0];
465 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
466 "Clearing MAC address\n");
468 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
471 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
472 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
473 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
475 netif_err(qdev, ifup, qdev->ndev,
476 "Failed to init mac address.\n");
480 void ql_link_on(struct ql_adapter *qdev)
482 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
483 netif_carrier_on(qdev->ndev);
484 ql_set_mac_addr(qdev, 1);
487 void ql_link_off(struct ql_adapter *qdev)
489 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
490 netif_carrier_off(qdev->ndev);
491 ql_set_mac_addr(qdev, 0);
494 /* Get a specific frame routing value from the CAM.
495 * Used for debug and reg dump.
497 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
501 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
505 ql_write32(qdev, RT_IDX,
506 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
507 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
510 *value = ql_read32(qdev, RT_DATA);
515 /* The NIC function for this chip has 16 routing indexes. Each one can be used
516 * to route different frame types to various inbound queues. We send broadcast/
517 * multicast/error frames to the default queue for slow handling,
518 * and CAM hit/RSS frames to the fast handling queues.
520 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
523 int status = -EINVAL; /* Return error if no mask match. */
529 value = RT_IDX_DST_CAM_Q | /* dest */
530 RT_IDX_TYPE_NICQ | /* type */
531 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
534 case RT_IDX_VALID: /* Promiscuous Mode frames. */
536 value = RT_IDX_DST_DFLT_Q | /* dest */
537 RT_IDX_TYPE_NICQ | /* type */
538 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
541 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
543 value = RT_IDX_DST_DFLT_Q | /* dest */
544 RT_IDX_TYPE_NICQ | /* type */
545 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
548 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
550 value = RT_IDX_DST_DFLT_Q | /* dest */
551 RT_IDX_TYPE_NICQ | /* type */
552 (RT_IDX_IP_CSUM_ERR_SLOT <<
553 RT_IDX_IDX_SHIFT); /* index */
556 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
558 value = RT_IDX_DST_DFLT_Q | /* dest */
559 RT_IDX_TYPE_NICQ | /* type */
560 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
561 RT_IDX_IDX_SHIFT); /* index */
564 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
566 value = RT_IDX_DST_DFLT_Q | /* dest */
567 RT_IDX_TYPE_NICQ | /* type */
568 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
571 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
573 value = RT_IDX_DST_DFLT_Q | /* dest */
574 RT_IDX_TYPE_NICQ | /* type */
575 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
578 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
580 value = RT_IDX_DST_DFLT_Q | /* dest */
581 RT_IDX_TYPE_NICQ | /* type */
582 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
585 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
587 value = RT_IDX_DST_RSS | /* dest */
588 RT_IDX_TYPE_NICQ | /* type */
589 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
592 case 0: /* Clear the E-bit on an entry. */
594 value = RT_IDX_DST_DFLT_Q | /* dest */
595 RT_IDX_TYPE_NICQ | /* type */
596 (index << RT_IDX_IDX_SHIFT);/* index */
600 netif_err(qdev, ifup, qdev->ndev,
601 "Mask type %d not yet supported.\n", mask);
607 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
610 value |= (enable ? RT_IDX_E : 0);
611 ql_write32(qdev, RT_IDX, value);
612 ql_write32(qdev, RT_DATA, enable ? mask : 0);
618 static void ql_enable_interrupts(struct ql_adapter *qdev)
620 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
623 static void ql_disable_interrupts(struct ql_adapter *qdev)
625 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
628 /* If we're running with multiple MSI-X vectors then we enable on the fly.
629 * Otherwise, we may have multiple outstanding workers and don't want to
630 * enable until the last one finishes. In this case, the irq_cnt gets
631 * incremented every time we queue a worker and decremented every time
632 * a worker finishes. Once it hits zero we enable the interrupt.
634 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
637 unsigned long hw_flags = 0;
638 struct intr_context *ctx = qdev->intr_context + intr;
640 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
641 /* Always enable if we're MSIX multi interrupts and
642 * it's not the default (zeroeth) interrupt.
644 ql_write32(qdev, INTR_EN,
646 var = ql_read32(qdev, STS);
650 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
651 if (atomic_dec_and_test(&ctx->irq_cnt)) {
652 ql_write32(qdev, INTR_EN,
654 var = ql_read32(qdev, STS);
656 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
660 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
663 struct intr_context *ctx;
665 /* HW disables for us if we're MSIX multi interrupts and
666 * it's not the default (zeroeth) interrupt.
668 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
671 ctx = qdev->intr_context + intr;
672 spin_lock(&qdev->hw_lock);
673 if (!atomic_read(&ctx->irq_cnt)) {
674 ql_write32(qdev, INTR_EN,
676 var = ql_read32(qdev, STS);
678 atomic_inc(&ctx->irq_cnt);
679 spin_unlock(&qdev->hw_lock);
683 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
686 for (i = 0; i < qdev->intr_count; i++) {
687 /* The enable call does a atomic_dec_and_test
688 * and enables only if the result is zero.
689 * So we precharge it here.
691 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
693 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
694 ql_enable_completion_interrupt(qdev, i);
699 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
703 __le16 *flash = (__le16 *)&qdev->flash;
705 status = strncmp((char *)&qdev->flash, str, 4);
707 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
711 for (i = 0; i < size; i++)
712 csum += le16_to_cpu(*flash++);
715 netif_err(qdev, ifup, qdev->ndev,
716 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
721 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
724 /* wait for reg to come ready */
725 status = ql_wait_reg_rdy(qdev,
726 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
729 /* set up for reg read */
730 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
731 /* wait for reg to come ready */
732 status = ql_wait_reg_rdy(qdev,
733 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
736 /* This data is stored on flash as an array of
737 * __le32. Since ql_read32() returns cpu endian
738 * we need to swap it back.
740 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
745 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
749 __le32 *p = (__le32 *)&qdev->flash;
753 /* Get flash offset for function and adjust
757 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
759 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
761 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
764 size = sizeof(struct flash_params_8000) / sizeof(u32);
765 for (i = 0; i < size; i++, p++) {
766 status = ql_read_flash_word(qdev, i+offset, p);
768 netif_err(qdev, ifup, qdev->ndev,
769 "Error reading flash.\n");
774 status = ql_validate_flash(qdev,
775 sizeof(struct flash_params_8000) / sizeof(u16),
778 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
783 /* Extract either manufacturer or BOFM modified
786 if (qdev->flash.flash_params_8000.data_type1 == 2)
788 qdev->flash.flash_params_8000.mac_addr1,
789 qdev->ndev->addr_len);
792 qdev->flash.flash_params_8000.mac_addr,
793 qdev->ndev->addr_len);
795 if (!is_valid_ether_addr(mac_addr)) {
796 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
801 memcpy(qdev->ndev->dev_addr,
803 qdev->ndev->addr_len);
806 ql_sem_unlock(qdev, SEM_FLASH_MASK);
810 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
814 __le32 *p = (__le32 *)&qdev->flash;
816 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
818 /* Second function's parameters follow the first
824 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
827 for (i = 0; i < size; i++, p++) {
828 status = ql_read_flash_word(qdev, i+offset, p);
830 netif_err(qdev, ifup, qdev->ndev,
831 "Error reading flash.\n");
837 status = ql_validate_flash(qdev,
838 sizeof(struct flash_params_8012) / sizeof(u16),
841 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
846 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
851 memcpy(qdev->ndev->dev_addr,
852 qdev->flash.flash_params_8012.mac_addr,
853 qdev->ndev->addr_len);
856 ql_sem_unlock(qdev, SEM_FLASH_MASK);
860 /* xgmac register are located behind the xgmac_addr and xgmac_data
861 * register pair. Each read/write requires us to wait for the ready
862 * bit before reading/writing the data.
864 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
867 /* wait for reg to come ready */
868 status = ql_wait_reg_rdy(qdev,
869 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
872 /* write the data to the data reg */
873 ql_write32(qdev, XGMAC_DATA, data);
874 /* trigger the write */
875 ql_write32(qdev, XGMAC_ADDR, reg);
879 /* xgmac register are located behind the xgmac_addr and xgmac_data
880 * register pair. Each read/write requires us to wait for the ready
881 * bit before reading/writing the data.
883 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
886 /* wait for reg to come ready */
887 status = ql_wait_reg_rdy(qdev,
888 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
891 /* set up for reg read */
892 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
893 /* wait for reg to come ready */
894 status = ql_wait_reg_rdy(qdev,
895 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
899 *data = ql_read32(qdev, XGMAC_DATA);
904 /* This is used for reading the 64-bit statistics regs. */
905 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
911 status = ql_read_xgmac_reg(qdev, reg, &lo);
915 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
919 *data = (u64) lo | ((u64) hi << 32);
925 static int ql_8000_port_initialize(struct ql_adapter *qdev)
929 * Get MPI firmware version for driver banner
932 status = ql_mb_about_fw(qdev);
935 status = ql_mb_get_fw_state(qdev);
938 /* Wake up a worker to get/set the TX/RX frame sizes. */
939 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
944 /* Take the MAC Core out of reset.
945 * Enable statistics counting.
946 * Take the transmitter/receiver out of reset.
947 * This functionality may be done in the MPI firmware at a
950 static int ql_8012_port_initialize(struct ql_adapter *qdev)
955 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
956 /* Another function has the semaphore, so
957 * wait for the port init bit to come ready.
959 netif_info(qdev, link, qdev->ndev,
960 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
961 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
963 netif_crit(qdev, link, qdev->ndev,
964 "Port initialize timed out.\n");
969 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
970 /* Set the core reset. */
971 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
974 data |= GLOBAL_CFG_RESET;
975 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
979 /* Clear the core reset and turn on jumbo for receiver. */
980 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
981 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
982 data |= GLOBAL_CFG_TX_STAT_EN;
983 data |= GLOBAL_CFG_RX_STAT_EN;
984 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
988 /* Enable transmitter, and clear it's reset. */
989 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
992 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
993 data |= TX_CFG_EN; /* Enable the transmitter. */
994 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
998 /* Enable receiver and clear it's reset. */
999 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1002 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1003 data |= RX_CFG_EN; /* Enable the receiver. */
1004 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1008 /* Turn on jumbo. */
1010 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1014 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1018 /* Signal to the world that the port is enabled. */
1019 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1021 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1025 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1027 return PAGE_SIZE << qdev->lbq_buf_order;
1030 /* Get the next large buffer. */
1031 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1033 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1034 rx_ring->lbq_curr_idx++;
1035 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1036 rx_ring->lbq_curr_idx = 0;
1037 rx_ring->lbq_free_cnt++;
1041 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1042 struct rx_ring *rx_ring)
1044 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1046 pci_dma_sync_single_for_cpu(qdev->pdev,
1047 dma_unmap_addr(lbq_desc, mapaddr),
1048 rx_ring->lbq_buf_size,
1049 PCI_DMA_FROMDEVICE);
1051 /* If it's the last chunk of our master page then
1054 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1055 == ql_lbq_block_size(qdev))
1056 pci_unmap_page(qdev->pdev,
1057 lbq_desc->p.pg_chunk.map,
1058 ql_lbq_block_size(qdev),
1059 PCI_DMA_FROMDEVICE);
1063 /* Get the next small buffer. */
1064 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1066 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1067 rx_ring->sbq_curr_idx++;
1068 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1069 rx_ring->sbq_curr_idx = 0;
1070 rx_ring->sbq_free_cnt++;
1074 /* Update an rx ring index. */
1075 static void ql_update_cq(struct rx_ring *rx_ring)
1077 rx_ring->cnsmr_idx++;
1078 rx_ring->curr_entry++;
1079 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1080 rx_ring->cnsmr_idx = 0;
1081 rx_ring->curr_entry = rx_ring->cq_base;
1085 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1087 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1090 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1091 struct bq_desc *lbq_desc)
1093 if (!rx_ring->pg_chunk.page) {
1095 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1097 qdev->lbq_buf_order);
1098 if (unlikely(!rx_ring->pg_chunk.page)) {
1099 netif_err(qdev, drv, qdev->ndev,
1100 "page allocation failed.\n");
1103 rx_ring->pg_chunk.offset = 0;
1104 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1105 0, ql_lbq_block_size(qdev),
1106 PCI_DMA_FROMDEVICE);
1107 if (pci_dma_mapping_error(qdev->pdev, map)) {
1108 __free_pages(rx_ring->pg_chunk.page,
1109 qdev->lbq_buf_order);
1110 rx_ring->pg_chunk.page = NULL;
1111 netif_err(qdev, drv, qdev->ndev,
1112 "PCI mapping failed.\n");
1115 rx_ring->pg_chunk.map = map;
1116 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1119 /* Copy the current master pg_chunk info
1120 * to the current descriptor.
1122 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1124 /* Adjust the master page chunk for next
1127 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1128 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1129 rx_ring->pg_chunk.page = NULL;
1130 lbq_desc->p.pg_chunk.last_flag = 1;
1132 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1133 get_page(rx_ring->pg_chunk.page);
1134 lbq_desc->p.pg_chunk.last_flag = 0;
1138 /* Process (refill) a large buffer queue. */
1139 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1141 u32 clean_idx = rx_ring->lbq_clean_idx;
1142 u32 start_idx = clean_idx;
1143 struct bq_desc *lbq_desc;
1147 while (rx_ring->lbq_free_cnt > 32) {
1148 for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
1149 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1150 "lbq: try cleaning clean_idx = %d.\n",
1152 lbq_desc = &rx_ring->lbq[clean_idx];
1153 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1154 rx_ring->lbq_clean_idx = clean_idx;
1155 netif_err(qdev, ifup, qdev->ndev,
1156 "Could not get a page chunk, i=%d, clean_idx =%d .\n",
1161 map = lbq_desc->p.pg_chunk.map +
1162 lbq_desc->p.pg_chunk.offset;
1163 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1164 dma_unmap_len_set(lbq_desc, maplen,
1165 rx_ring->lbq_buf_size);
1166 *lbq_desc->addr = cpu_to_le64(map);
1168 pci_dma_sync_single_for_device(qdev->pdev, map,
1169 rx_ring->lbq_buf_size,
1170 PCI_DMA_FROMDEVICE);
1172 if (clean_idx == rx_ring->lbq_len)
1176 rx_ring->lbq_clean_idx = clean_idx;
1177 rx_ring->lbq_prod_idx += 16;
1178 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1179 rx_ring->lbq_prod_idx = 0;
1180 rx_ring->lbq_free_cnt -= 16;
1183 if (start_idx != clean_idx) {
1184 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1185 "lbq: updating prod idx = %d.\n",
1186 rx_ring->lbq_prod_idx);
1187 ql_write_db_reg(rx_ring->lbq_prod_idx,
1188 rx_ring->lbq_prod_idx_db_reg);
1192 /* Process (refill) a small buffer queue. */
1193 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1195 u32 clean_idx = rx_ring->sbq_clean_idx;
1196 u32 start_idx = clean_idx;
1197 struct bq_desc *sbq_desc;
1201 while (rx_ring->sbq_free_cnt > 16) {
1202 for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
1203 sbq_desc = &rx_ring->sbq[clean_idx];
1204 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1205 "sbq: try cleaning clean_idx = %d.\n",
1207 if (sbq_desc->p.skb == NULL) {
1208 netif_printk(qdev, rx_status, KERN_DEBUG,
1210 "sbq: getting new skb for index %d.\n",
1213 netdev_alloc_skb(qdev->ndev,
1215 if (sbq_desc->p.skb == NULL) {
1216 rx_ring->sbq_clean_idx = clean_idx;
1219 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1220 map = pci_map_single(qdev->pdev,
1221 sbq_desc->p.skb->data,
1222 rx_ring->sbq_buf_size,
1223 PCI_DMA_FROMDEVICE);
1224 if (pci_dma_mapping_error(qdev->pdev, map)) {
1225 netif_err(qdev, ifup, qdev->ndev,
1226 "PCI mapping failed.\n");
1227 rx_ring->sbq_clean_idx = clean_idx;
1228 dev_kfree_skb_any(sbq_desc->p.skb);
1229 sbq_desc->p.skb = NULL;
1232 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1233 dma_unmap_len_set(sbq_desc, maplen,
1234 rx_ring->sbq_buf_size);
1235 *sbq_desc->addr = cpu_to_le64(map);
1239 if (clean_idx == rx_ring->sbq_len)
1242 rx_ring->sbq_clean_idx = clean_idx;
1243 rx_ring->sbq_prod_idx += 16;
1244 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1245 rx_ring->sbq_prod_idx = 0;
1246 rx_ring->sbq_free_cnt -= 16;
1249 if (start_idx != clean_idx) {
1250 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1251 "sbq: updating prod idx = %d.\n",
1252 rx_ring->sbq_prod_idx);
1253 ql_write_db_reg(rx_ring->sbq_prod_idx,
1254 rx_ring->sbq_prod_idx_db_reg);
1258 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1259 struct rx_ring *rx_ring)
1261 ql_update_sbq(qdev, rx_ring);
1262 ql_update_lbq(qdev, rx_ring);
1265 /* Unmaps tx buffers. Can be called from send() if a pci mapping
1266 * fails at some stage, or from the interrupt when a tx completes.
1268 static void ql_unmap_send(struct ql_adapter *qdev,
1269 struct tx_ring_desc *tx_ring_desc, int mapped)
1272 for (i = 0; i < mapped; i++) {
1273 if (i == 0 || (i == 7 && mapped > 7)) {
1275 * Unmap the skb->data area, or the
1276 * external sglist (AKA the Outbound
1277 * Address List (OAL)).
1278 * If its the zeroeth element, then it's
1279 * the skb->data area. If it's the 7th
1280 * element and there is more than 6 frags,
1284 netif_printk(qdev, tx_done, KERN_DEBUG,
1286 "unmapping OAL area.\n");
1288 pci_unmap_single(qdev->pdev,
1289 dma_unmap_addr(&tx_ring_desc->map[i],
1291 dma_unmap_len(&tx_ring_desc->map[i],
1295 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1296 "unmapping frag %d.\n", i);
1297 pci_unmap_page(qdev->pdev,
1298 dma_unmap_addr(&tx_ring_desc->map[i],
1300 dma_unmap_len(&tx_ring_desc->map[i],
1301 maplen), PCI_DMA_TODEVICE);
1307 /* Map the buffers for this transmit. This will return
1308 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1310 static int ql_map_send(struct ql_adapter *qdev,
1311 struct ob_mac_iocb_req *mac_iocb_ptr,
1312 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1314 int len = skb_headlen(skb);
1316 int frag_idx, err, map_idx = 0;
1317 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1318 int frag_cnt = skb_shinfo(skb)->nr_frags;
1321 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1322 "frag_cnt = %d.\n", frag_cnt);
1325 * Map the skb buffer first.
1327 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1329 err = pci_dma_mapping_error(qdev->pdev, map);
1331 netif_err(qdev, tx_queued, qdev->ndev,
1332 "PCI mapping failed with error: %d\n", err);
1334 return NETDEV_TX_BUSY;
1337 tbd->len = cpu_to_le32(len);
1338 tbd->addr = cpu_to_le64(map);
1339 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1340 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1344 * This loop fills the remainder of the 8 address descriptors
1345 * in the IOCB. If there are more than 7 fragments, then the
1346 * eighth address desc will point to an external list (OAL).
1347 * When this happens, the remainder of the frags will be stored
1350 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1351 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1353 if (frag_idx == 6 && frag_cnt > 7) {
1354 /* Let's tack on an sglist.
1355 * Our control block will now
1357 * iocb->seg[0] = skb->data
1358 * iocb->seg[1] = frag[0]
1359 * iocb->seg[2] = frag[1]
1360 * iocb->seg[3] = frag[2]
1361 * iocb->seg[4] = frag[3]
1362 * iocb->seg[5] = frag[4]
1363 * iocb->seg[6] = frag[5]
1364 * iocb->seg[7] = ptr to OAL (external sglist)
1365 * oal->seg[0] = frag[6]
1366 * oal->seg[1] = frag[7]
1367 * oal->seg[2] = frag[8]
1368 * oal->seg[3] = frag[9]
1369 * oal->seg[4] = frag[10]
1372 /* Tack on the OAL in the eighth segment of IOCB. */
1373 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1376 err = pci_dma_mapping_error(qdev->pdev, map);
1378 netif_err(qdev, tx_queued, qdev->ndev,
1379 "PCI mapping outbound address list with error: %d\n",
1384 tbd->addr = cpu_to_le64(map);
1386 * The length is the number of fragments
1387 * that remain to be mapped times the length
1388 * of our sglist (OAL).
1391 cpu_to_le32((sizeof(struct tx_buf_desc) *
1392 (frag_cnt - frag_idx)) | TX_DESC_C);
1393 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1395 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1396 sizeof(struct oal));
1397 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1401 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1404 err = dma_mapping_error(&qdev->pdev->dev, map);
1406 netif_err(qdev, tx_queued, qdev->ndev,
1407 "PCI mapping frags failed with error: %d.\n",
1412 tbd->addr = cpu_to_le64(map);
1413 tbd->len = cpu_to_le32(skb_frag_size(frag));
1414 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1415 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1416 skb_frag_size(frag));
1419 /* Save the number of segments we've mapped. */
1420 tx_ring_desc->map_cnt = map_idx;
1421 /* Terminate the last segment. */
1422 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1423 return NETDEV_TX_OK;
1427 * If the first frag mapping failed, then i will be zero.
1428 * This causes the unmap of the skb->data area. Otherwise
1429 * we pass in the number of frags that mapped successfully
1430 * so they can be umapped.
1432 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1433 return NETDEV_TX_BUSY;
1436 /* Categorizing receive firmware frame errors */
1437 static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
1438 struct rx_ring *rx_ring)
1440 struct nic_stats *stats = &qdev->nic_stats;
1442 stats->rx_err_count++;
1443 rx_ring->rx_errors++;
1445 switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1446 case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1447 stats->rx_code_err++;
1449 case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1450 stats->rx_oversize_err++;
1452 case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1453 stats->rx_undersize_err++;
1455 case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1456 stats->rx_preamble_err++;
1458 case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1459 stats->rx_frame_len_err++;
1461 case IB_MAC_IOCB_RSP_ERR_CRC:
1462 stats->rx_crc_err++;
1469 * ql_update_mac_hdr_len - helper routine to update the mac header length
1470 * based on vlan tags if present
1472 static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
1473 struct ib_mac_iocb_rsp *ib_mac_rsp,
1474 void *page, size_t *len)
1478 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1480 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
1482 /* Look for stacked vlan tags in ethertype field */
1483 if (tags[6] == ETH_P_8021Q &&
1484 tags[8] == ETH_P_8021Q)
1485 *len += 2 * VLAN_HLEN;
1491 /* Process an inbound completion from an rx ring. */
1492 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1493 struct rx_ring *rx_ring,
1494 struct ib_mac_iocb_rsp *ib_mac_rsp,
1498 struct sk_buff *skb;
1499 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1500 struct napi_struct *napi = &rx_ring->napi;
1502 /* Frame error, so drop the packet. */
1503 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1504 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1505 put_page(lbq_desc->p.pg_chunk.page);
1508 napi->dev = qdev->ndev;
1510 skb = napi_get_frags(napi);
1512 netif_err(qdev, drv, qdev->ndev,
1513 "Couldn't get an skb, exiting.\n");
1514 rx_ring->rx_dropped++;
1515 put_page(lbq_desc->p.pg_chunk.page);
1518 prefetch(lbq_desc->p.pg_chunk.va);
1519 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1520 lbq_desc->p.pg_chunk.page,
1521 lbq_desc->p.pg_chunk.offset,
1525 skb->data_len += length;
1526 skb->truesize += length;
1527 skb_shinfo(skb)->nr_frags++;
1529 rx_ring->rx_packets++;
1530 rx_ring->rx_bytes += length;
1531 skb->ip_summed = CHECKSUM_UNNECESSARY;
1532 skb_record_rx_queue(skb, rx_ring->cq_id);
1533 if (vlan_id != 0xffff)
1534 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1535 napi_gro_frags(napi);
1538 /* Process an inbound completion from an rx ring. */
1539 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1540 struct rx_ring *rx_ring,
1541 struct ib_mac_iocb_rsp *ib_mac_rsp,
1545 struct net_device *ndev = qdev->ndev;
1546 struct sk_buff *skb = NULL;
1548 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1549 struct napi_struct *napi = &rx_ring->napi;
1550 size_t hlen = ETH_HLEN;
1552 skb = netdev_alloc_skb(ndev, length);
1554 rx_ring->rx_dropped++;
1555 put_page(lbq_desc->p.pg_chunk.page);
1559 addr = lbq_desc->p.pg_chunk.va;
1562 /* Frame error, so drop the packet. */
1563 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1564 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1568 /* Update the MAC header length*/
1569 ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
1571 /* The max framesize filter on this chip is set higher than
1572 * MTU since FCoE uses 2k frames.
1574 if (skb->len > ndev->mtu + hlen) {
1575 netif_err(qdev, drv, qdev->ndev,
1576 "Segment too small, dropping.\n");
1577 rx_ring->rx_dropped++;
1580 skb_put_data(skb, addr, hlen);
1581 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1582 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1584 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1585 lbq_desc->p.pg_chunk.offset + hlen,
1587 skb->len += length - hlen;
1588 skb->data_len += length - hlen;
1589 skb->truesize += length - hlen;
1591 rx_ring->rx_packets++;
1592 rx_ring->rx_bytes += skb->len;
1593 skb->protocol = eth_type_trans(skb, ndev);
1594 skb_checksum_none_assert(skb);
1596 if ((ndev->features & NETIF_F_RXCSUM) &&
1597 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1599 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1600 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1601 "TCP checksum done!\n");
1602 skb->ip_summed = CHECKSUM_UNNECESSARY;
1603 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1604 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1605 /* Unfragmented ipv4 UDP frame. */
1607 (struct iphdr *)((u8 *)addr + hlen);
1608 if (!(iph->frag_off &
1609 htons(IP_MF|IP_OFFSET))) {
1610 skb->ip_summed = CHECKSUM_UNNECESSARY;
1611 netif_printk(qdev, rx_status, KERN_DEBUG,
1613 "UDP checksum done!\n");
1618 skb_record_rx_queue(skb, rx_ring->cq_id);
1619 if (vlan_id != 0xffff)
1620 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1621 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1622 napi_gro_receive(napi, skb);
1624 netif_receive_skb(skb);
1627 dev_kfree_skb_any(skb);
1628 put_page(lbq_desc->p.pg_chunk.page);
1631 /* Process an inbound completion from an rx ring. */
1632 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1633 struct rx_ring *rx_ring,
1634 struct ib_mac_iocb_rsp *ib_mac_rsp,
1638 struct net_device *ndev = qdev->ndev;
1639 struct sk_buff *skb = NULL;
1640 struct sk_buff *new_skb = NULL;
1641 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1643 skb = sbq_desc->p.skb;
1644 /* Allocate new_skb and copy */
1645 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1646 if (new_skb == NULL) {
1647 rx_ring->rx_dropped++;
1650 skb_reserve(new_skb, NET_IP_ALIGN);
1652 pci_dma_sync_single_for_cpu(qdev->pdev,
1653 dma_unmap_addr(sbq_desc, mapaddr),
1654 dma_unmap_len(sbq_desc, maplen),
1655 PCI_DMA_FROMDEVICE);
1657 skb_put_data(new_skb, skb->data, length);
1659 pci_dma_sync_single_for_device(qdev->pdev,
1660 dma_unmap_addr(sbq_desc, mapaddr),
1661 dma_unmap_len(sbq_desc, maplen),
1662 PCI_DMA_FROMDEVICE);
1665 /* Frame error, so drop the packet. */
1666 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1667 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1668 dev_kfree_skb_any(skb);
1672 /* loopback self test for ethtool */
1673 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1674 ql_check_lb_frame(qdev, skb);
1675 dev_kfree_skb_any(skb);
1679 /* The max framesize filter on this chip is set higher than
1680 * MTU since FCoE uses 2k frames.
1682 if (skb->len > ndev->mtu + ETH_HLEN) {
1683 dev_kfree_skb_any(skb);
1684 rx_ring->rx_dropped++;
1688 prefetch(skb->data);
1689 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1690 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1692 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1693 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1694 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1695 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1696 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1697 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1699 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1700 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1701 "Promiscuous Packet.\n");
1703 rx_ring->rx_packets++;
1704 rx_ring->rx_bytes += skb->len;
1705 skb->protocol = eth_type_trans(skb, ndev);
1706 skb_checksum_none_assert(skb);
1708 /* If rx checksum is on, and there are no
1709 * csum or frame errors.
1711 if ((ndev->features & NETIF_F_RXCSUM) &&
1712 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1714 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1715 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1716 "TCP checksum done!\n");
1717 skb->ip_summed = CHECKSUM_UNNECESSARY;
1718 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1719 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1720 /* Unfragmented ipv4 UDP frame. */
1721 struct iphdr *iph = (struct iphdr *) skb->data;
1722 if (!(iph->frag_off &
1723 htons(IP_MF|IP_OFFSET))) {
1724 skb->ip_summed = CHECKSUM_UNNECESSARY;
1725 netif_printk(qdev, rx_status, KERN_DEBUG,
1727 "UDP checksum done!\n");
1732 skb_record_rx_queue(skb, rx_ring->cq_id);
1733 if (vlan_id != 0xffff)
1734 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1735 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1736 napi_gro_receive(&rx_ring->napi, skb);
1738 netif_receive_skb(skb);
1741 static void ql_realign_skb(struct sk_buff *skb, int len)
1743 void *temp_addr = skb->data;
1745 /* Undo the skb_reserve(skb,32) we did before
1746 * giving to hardware, and realign data on
1747 * a 2-byte boundary.
1749 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1750 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1751 skb_copy_to_linear_data(skb, temp_addr,
1756 * This function builds an skb for the given inbound
1757 * completion. It will be rewritten for readability in the near
1758 * future, but for not it works well.
1760 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1761 struct rx_ring *rx_ring,
1762 struct ib_mac_iocb_rsp *ib_mac_rsp)
1764 struct bq_desc *lbq_desc;
1765 struct bq_desc *sbq_desc;
1766 struct sk_buff *skb = NULL;
1767 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1768 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1769 size_t hlen = ETH_HLEN;
1772 * Handle the header buffer if present.
1774 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1775 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1776 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1777 "Header of %d bytes in small buffer.\n", hdr_len);
1779 * Headers fit nicely into a small buffer.
1781 sbq_desc = ql_get_curr_sbuf(rx_ring);
1782 pci_unmap_single(qdev->pdev,
1783 dma_unmap_addr(sbq_desc, mapaddr),
1784 dma_unmap_len(sbq_desc, maplen),
1785 PCI_DMA_FROMDEVICE);
1786 skb = sbq_desc->p.skb;
1787 ql_realign_skb(skb, hdr_len);
1788 skb_put(skb, hdr_len);
1789 sbq_desc->p.skb = NULL;
1793 * Handle the data buffer(s).
1795 if (unlikely(!length)) { /* Is there data too? */
1796 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1797 "No Data buffer in this packet.\n");
1801 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1802 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1803 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1804 "Headers in small, data of %d bytes in small, combine them.\n",
1807 * Data is less than small buffer size so it's
1808 * stuffed in a small buffer.
1809 * For this case we append the data
1810 * from the "data" small buffer to the "header" small
1813 sbq_desc = ql_get_curr_sbuf(rx_ring);
1814 pci_dma_sync_single_for_cpu(qdev->pdev,
1816 (sbq_desc, mapaddr),
1819 PCI_DMA_FROMDEVICE);
1820 skb_put_data(skb, sbq_desc->p.skb->data, length);
1821 pci_dma_sync_single_for_device(qdev->pdev,
1828 PCI_DMA_FROMDEVICE);
1830 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1831 "%d bytes in a single small buffer.\n",
1833 sbq_desc = ql_get_curr_sbuf(rx_ring);
1834 skb = sbq_desc->p.skb;
1835 ql_realign_skb(skb, length);
1836 skb_put(skb, length);
1837 pci_unmap_single(qdev->pdev,
1838 dma_unmap_addr(sbq_desc,
1840 dma_unmap_len(sbq_desc,
1842 PCI_DMA_FROMDEVICE);
1843 sbq_desc->p.skb = NULL;
1845 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1846 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1847 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1848 "Header in small, %d bytes in large. Chain large to small!\n",
1851 * The data is in a single large buffer. We
1852 * chain it to the header buffer's skb and let
1855 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1856 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1857 "Chaining page at offset = %d, for %d bytes to skb.\n",
1858 lbq_desc->p.pg_chunk.offset, length);
1859 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1860 lbq_desc->p.pg_chunk.offset,
1863 skb->data_len += length;
1864 skb->truesize += length;
1867 * The headers and data are in a single large buffer. We
1868 * copy it to a new skb and let it go. This can happen with
1869 * jumbo mtu on a non-TCP/UDP frame.
1871 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1872 skb = netdev_alloc_skb(qdev->ndev, length);
1874 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1875 "No skb available, drop the packet.\n");
1878 pci_unmap_page(qdev->pdev,
1879 dma_unmap_addr(lbq_desc,
1881 dma_unmap_len(lbq_desc, maplen),
1882 PCI_DMA_FROMDEVICE);
1883 skb_reserve(skb, NET_IP_ALIGN);
1884 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1885 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1887 skb_fill_page_desc(skb, 0,
1888 lbq_desc->p.pg_chunk.page,
1889 lbq_desc->p.pg_chunk.offset,
1892 skb->data_len += length;
1893 skb->truesize += length;
1894 ql_update_mac_hdr_len(qdev, ib_mac_rsp,
1895 lbq_desc->p.pg_chunk.va,
1897 __pskb_pull_tail(skb, hlen);
1901 * The data is in a chain of large buffers
1902 * pointed to by a small buffer. We loop
1903 * thru and chain them to the our small header
1905 * frags: There are 18 max frags and our small
1906 * buffer will hold 32 of them. The thing is,
1907 * we'll use 3 max for our 9000 byte jumbo
1908 * frames. If the MTU goes up we could
1909 * eventually be in trouble.
1912 sbq_desc = ql_get_curr_sbuf(rx_ring);
1913 pci_unmap_single(qdev->pdev,
1914 dma_unmap_addr(sbq_desc, mapaddr),
1915 dma_unmap_len(sbq_desc, maplen),
1916 PCI_DMA_FROMDEVICE);
1917 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1919 * This is an non TCP/UDP IP frame, so
1920 * the headers aren't split into a small
1921 * buffer. We have to use the small buffer
1922 * that contains our sg list as our skb to
1923 * send upstairs. Copy the sg list here to
1924 * a local buffer and use it to find the
1927 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1928 "%d bytes of headers & data in chain of large.\n",
1930 skb = sbq_desc->p.skb;
1931 sbq_desc->p.skb = NULL;
1932 skb_reserve(skb, NET_IP_ALIGN);
1935 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1936 size = (length < rx_ring->lbq_buf_size) ? length :
1937 rx_ring->lbq_buf_size;
1939 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1940 "Adding page %d to skb for %d bytes.\n",
1942 skb_fill_page_desc(skb, i,
1943 lbq_desc->p.pg_chunk.page,
1944 lbq_desc->p.pg_chunk.offset,
1947 skb->data_len += size;
1948 skb->truesize += size;
1951 } while (length > 0);
1952 ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
1954 __pskb_pull_tail(skb, hlen);
1959 /* Process an inbound completion from an rx ring. */
1960 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1961 struct rx_ring *rx_ring,
1962 struct ib_mac_iocb_rsp *ib_mac_rsp,
1965 struct net_device *ndev = qdev->ndev;
1966 struct sk_buff *skb = NULL;
1968 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1970 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1971 if (unlikely(!skb)) {
1972 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1973 "No skb available, drop packet.\n");
1974 rx_ring->rx_dropped++;
1978 /* Frame error, so drop the packet. */
1979 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1980 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1981 dev_kfree_skb_any(skb);
1985 /* The max framesize filter on this chip is set higher than
1986 * MTU since FCoE uses 2k frames.
1988 if (skb->len > ndev->mtu + ETH_HLEN) {
1989 dev_kfree_skb_any(skb);
1990 rx_ring->rx_dropped++;
1994 /* loopback self test for ethtool */
1995 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1996 ql_check_lb_frame(qdev, skb);
1997 dev_kfree_skb_any(skb);
2001 prefetch(skb->data);
2002 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
2003 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
2004 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2005 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
2006 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2007 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
2008 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2009 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
2010 rx_ring->rx_multicast++;
2012 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
2013 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2014 "Promiscuous Packet.\n");
2017 skb->protocol = eth_type_trans(skb, ndev);
2018 skb_checksum_none_assert(skb);
2020 /* If rx checksum is on, and there are no
2021 * csum or frame errors.
2023 if ((ndev->features & NETIF_F_RXCSUM) &&
2024 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
2026 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
2027 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2028 "TCP checksum done!\n");
2029 skb->ip_summed = CHECKSUM_UNNECESSARY;
2030 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
2031 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2032 /* Unfragmented ipv4 UDP frame. */
2033 struct iphdr *iph = (struct iphdr *) skb->data;
2034 if (!(iph->frag_off &
2035 htons(IP_MF|IP_OFFSET))) {
2036 skb->ip_summed = CHECKSUM_UNNECESSARY;
2037 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2038 "TCP checksum done!\n");
2043 rx_ring->rx_packets++;
2044 rx_ring->rx_bytes += skb->len;
2045 skb_record_rx_queue(skb, rx_ring->cq_id);
2046 if (vlan_id != 0xffff)
2047 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
2048 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2049 napi_gro_receive(&rx_ring->napi, skb);
2051 netif_receive_skb(skb);
2054 /* Process an inbound completion from an rx ring. */
2055 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2056 struct rx_ring *rx_ring,
2057 struct ib_mac_iocb_rsp *ib_mac_rsp)
2059 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2060 u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2061 (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
2062 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2063 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2065 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2067 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2068 /* The data and headers are split into
2071 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2073 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2074 /* The data fit in a single small buffer.
2075 * Allocate a new skb, copy the data and
2076 * return the buffer to the free pool.
2078 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2080 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2081 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2082 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2083 /* TCP packet in a page chunk that's been checksummed.
2084 * Tack it on to our GRO skb and let it go.
2086 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2088 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2089 /* Non-TCP packet in a page chunk. Allocate an
2090 * skb, tack it on frags, and send it up.
2092 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2095 /* Non-TCP/UDP large frames that span multiple buffers
2096 * can be processed corrrectly by the split frame logic.
2098 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2102 return (unsigned long)length;
2105 /* Process an outbound completion from an rx ring. */
2106 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2107 struct ob_mac_iocb_rsp *mac_rsp)
2109 struct tx_ring *tx_ring;
2110 struct tx_ring_desc *tx_ring_desc;
2112 QL_DUMP_OB_MAC_RSP(mac_rsp);
2113 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2114 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2115 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2116 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2117 tx_ring->tx_packets++;
2118 dev_kfree_skb(tx_ring_desc->skb);
2119 tx_ring_desc->skb = NULL;
2121 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2124 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2125 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2126 netif_warn(qdev, tx_done, qdev->ndev,
2127 "Total descriptor length did not match transfer length.\n");
2129 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2130 netif_warn(qdev, tx_done, qdev->ndev,
2131 "Frame too short to be valid, not sent.\n");
2133 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2134 netif_warn(qdev, tx_done, qdev->ndev,
2135 "Frame too long, but sent anyway.\n");
2137 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2138 netif_warn(qdev, tx_done, qdev->ndev,
2139 "PCI backplane error. Frame not sent.\n");
2142 atomic_inc(&tx_ring->tx_count);
2145 /* Fire up a handler to reset the MPI processor. */
2146 void ql_queue_fw_error(struct ql_adapter *qdev)
2149 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2152 void ql_queue_asic_error(struct ql_adapter *qdev)
2155 ql_disable_interrupts(qdev);
2156 /* Clear adapter up bit to signal the recovery
2157 * process that it shouldn't kill the reset worker
2160 clear_bit(QL_ADAPTER_UP, &qdev->flags);
2161 /* Set asic recovery bit to indicate reset process that we are
2162 * in fatal error recovery process rather than normal close
2164 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2165 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2168 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2169 struct ib_ae_iocb_rsp *ib_ae_rsp)
2171 switch (ib_ae_rsp->event) {
2172 case MGMT_ERR_EVENT:
2173 netif_err(qdev, rx_err, qdev->ndev,
2174 "Management Processor Fatal Error.\n");
2175 ql_queue_fw_error(qdev);
2178 case CAM_LOOKUP_ERR_EVENT:
2179 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2180 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2181 ql_queue_asic_error(qdev);
2184 case SOFT_ECC_ERROR_EVENT:
2185 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2186 ql_queue_asic_error(qdev);
2189 case PCI_ERR_ANON_BUF_RD:
2190 netdev_err(qdev->ndev, "PCI error occurred when reading "
2191 "anonymous buffers from rx_ring %d.\n",
2193 ql_queue_asic_error(qdev);
2197 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2199 ql_queue_asic_error(qdev);
2204 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2206 struct ql_adapter *qdev = rx_ring->qdev;
2207 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2208 struct ob_mac_iocb_rsp *net_rsp = NULL;
2211 struct tx_ring *tx_ring;
2212 /* While there are entries in the completion queue. */
2213 while (prod != rx_ring->cnsmr_idx) {
2215 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2216 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2217 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2219 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2221 switch (net_rsp->opcode) {
2223 case OPCODE_OB_MAC_TSO_IOCB:
2224 case OPCODE_OB_MAC_IOCB:
2225 ql_process_mac_tx_intr(qdev, net_rsp);
2228 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2229 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2233 ql_update_cq(rx_ring);
2234 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2238 ql_write_cq_idx(rx_ring);
2239 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2240 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2241 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2243 * The queue got stopped because the tx_ring was full.
2244 * Wake it up, because it's now at least 25% empty.
2246 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2252 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2254 struct ql_adapter *qdev = rx_ring->qdev;
2255 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2256 struct ql_net_rsp_iocb *net_rsp;
2259 /* While there are entries in the completion queue. */
2260 while (prod != rx_ring->cnsmr_idx) {
2262 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2263 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2264 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2266 net_rsp = rx_ring->curr_entry;
2268 switch (net_rsp->opcode) {
2269 case OPCODE_IB_MAC_IOCB:
2270 ql_process_mac_rx_intr(qdev, rx_ring,
2271 (struct ib_mac_iocb_rsp *)
2275 case OPCODE_IB_AE_IOCB:
2276 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2280 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2281 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2286 ql_update_cq(rx_ring);
2287 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2288 if (count == budget)
2291 ql_update_buffer_queues(qdev, rx_ring);
2292 ql_write_cq_idx(rx_ring);
2296 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2298 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2299 struct ql_adapter *qdev = rx_ring->qdev;
2300 struct rx_ring *trx_ring;
2301 int i, work_done = 0;
2302 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2304 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2305 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2307 /* Service the TX rings first. They start
2308 * right after the RSS rings. */
2309 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2310 trx_ring = &qdev->rx_ring[i];
2311 /* If this TX completion ring belongs to this vector and
2312 * it's not empty then service it.
2314 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2315 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2316 trx_ring->cnsmr_idx)) {
2317 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2318 "%s: Servicing TX completion ring %d.\n",
2319 __func__, trx_ring->cq_id);
2320 ql_clean_outbound_rx_ring(trx_ring);
2325 * Now service the RSS ring if it's active.
2327 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2328 rx_ring->cnsmr_idx) {
2329 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2330 "%s: Servicing RX completion ring %d.\n",
2331 __func__, rx_ring->cq_id);
2332 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2335 if (work_done < budget) {
2336 napi_complete_done(napi, work_done);
2337 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2342 static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2344 struct ql_adapter *qdev = netdev_priv(ndev);
2346 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
2347 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2348 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2350 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2355 * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
2356 * based on the features to enable/disable hardware vlan accel
2358 static int qlge_update_hw_vlan_features(struct net_device *ndev,
2359 netdev_features_t features)
2361 struct ql_adapter *qdev = netdev_priv(ndev);
2363 bool need_restart = netif_running(ndev);
2366 status = ql_adapter_down(qdev);
2368 netif_err(qdev, link, qdev->ndev,
2369 "Failed to bring down the adapter\n");
2374 /* update the features with resent change */
2375 ndev->features = features;
2378 status = ql_adapter_up(qdev);
2380 netif_err(qdev, link, qdev->ndev,
2381 "Failed to bring up the adapter\n");
2389 static int qlge_set_features(struct net_device *ndev,
2390 netdev_features_t features)
2392 netdev_features_t changed = ndev->features ^ features;
2395 if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
2396 /* Update the behavior of vlan accel in the adapter */
2397 err = qlge_update_hw_vlan_features(ndev, features);
2401 qlge_vlan_mode(ndev, features);
2407 static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2409 u32 enable_bit = MAC_ADDR_E;
2412 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2413 MAC_ADDR_TYPE_VLAN, vid);
2415 netif_err(qdev, ifup, qdev->ndev,
2416 "Failed to init vlan address.\n");
2420 static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
2422 struct ql_adapter *qdev = netdev_priv(ndev);
2426 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2430 err = __qlge_vlan_rx_add_vid(qdev, vid);
2431 set_bit(vid, qdev->active_vlans);
2433 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2438 static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2443 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2444 MAC_ADDR_TYPE_VLAN, vid);
2446 netif_err(qdev, ifup, qdev->ndev,
2447 "Failed to clear vlan address.\n");
2451 static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
2453 struct ql_adapter *qdev = netdev_priv(ndev);
2457 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2461 err = __qlge_vlan_rx_kill_vid(qdev, vid);
2462 clear_bit(vid, qdev->active_vlans);
2464 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2469 static void qlge_restore_vlan(struct ql_adapter *qdev)
2474 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2478 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2479 __qlge_vlan_rx_add_vid(qdev, vid);
2481 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2484 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2485 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2487 struct rx_ring *rx_ring = dev_id;
2488 napi_schedule(&rx_ring->napi);
2492 /* This handles a fatal error, MPI activity, and the default
2493 * rx_ring in an MSI-X multiple vector environment.
2494 * In MSI/Legacy environment it also process the rest of
2497 static irqreturn_t qlge_isr(int irq, void *dev_id)
2499 struct rx_ring *rx_ring = dev_id;
2500 struct ql_adapter *qdev = rx_ring->qdev;
2501 struct intr_context *intr_context = &qdev->intr_context[0];
2505 spin_lock(&qdev->hw_lock);
2506 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2507 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2508 "Shared Interrupt, Not ours!\n");
2509 spin_unlock(&qdev->hw_lock);
2512 spin_unlock(&qdev->hw_lock);
2514 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2517 * Check for fatal error.
2520 ql_queue_asic_error(qdev);
2521 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2522 var = ql_read32(qdev, ERR_STS);
2523 netdev_err(qdev->ndev, "Resetting chip. "
2524 "Error Status Register = 0x%x\n", var);
2529 * Check MPI processor activity.
2531 if ((var & STS_PI) &&
2532 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2534 * We've got an async event or mailbox completion.
2535 * Handle it and clear the source of the interrupt.
2537 netif_err(qdev, intr, qdev->ndev,
2538 "Got MPI processor interrupt.\n");
2539 ql_disable_completion_interrupt(qdev, intr_context->intr);
2540 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2541 queue_delayed_work_on(smp_processor_id(),
2542 qdev->workqueue, &qdev->mpi_work, 0);
2547 * Get the bit-mask that shows the active queues for this
2548 * pass. Compare it to the queues that this irq services
2549 * and call napi if there's a match.
2551 var = ql_read32(qdev, ISR1);
2552 if (var & intr_context->irq_mask) {
2553 netif_info(qdev, intr, qdev->ndev,
2554 "Waking handler for rx_ring[0].\n");
2555 ql_disable_completion_interrupt(qdev, intr_context->intr);
2556 napi_schedule(&rx_ring->napi);
2559 ql_enable_completion_interrupt(qdev, intr_context->intr);
2560 return work_done ? IRQ_HANDLED : IRQ_NONE;
2563 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2566 if (skb_is_gso(skb)) {
2568 __be16 l3_proto = vlan_get_protocol(skb);
2570 err = skb_cow_head(skb, 0);
2574 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2575 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2576 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2577 mac_iocb_ptr->total_hdrs_len =
2578 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2579 mac_iocb_ptr->net_trans_offset =
2580 cpu_to_le16(skb_network_offset(skb) |
2581 skb_transport_offset(skb)
2582 << OB_MAC_TRANSPORT_HDR_SHIFT);
2583 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2584 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2585 if (likely(l3_proto == htons(ETH_P_IP))) {
2586 struct iphdr *iph = ip_hdr(skb);
2588 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2589 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2593 } else if (l3_proto == htons(ETH_P_IPV6)) {
2594 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2595 tcp_hdr(skb)->check =
2596 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2597 &ipv6_hdr(skb)->daddr,
2605 static void ql_hw_csum_setup(struct sk_buff *skb,
2606 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2609 struct iphdr *iph = ip_hdr(skb);
2611 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2612 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2613 mac_iocb_ptr->net_trans_offset =
2614 cpu_to_le16(skb_network_offset(skb) |
2615 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2617 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2618 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2619 if (likely(iph->protocol == IPPROTO_TCP)) {
2620 check = &(tcp_hdr(skb)->check);
2621 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2622 mac_iocb_ptr->total_hdrs_len =
2623 cpu_to_le16(skb_transport_offset(skb) +
2624 (tcp_hdr(skb)->doff << 2));
2626 check = &(udp_hdr(skb)->check);
2627 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2628 mac_iocb_ptr->total_hdrs_len =
2629 cpu_to_le16(skb_transport_offset(skb) +
2630 sizeof(struct udphdr));
2632 *check = ~csum_tcpudp_magic(iph->saddr,
2633 iph->daddr, len, iph->protocol, 0);
2636 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2638 struct tx_ring_desc *tx_ring_desc;
2639 struct ob_mac_iocb_req *mac_iocb_ptr;
2640 struct ql_adapter *qdev = netdev_priv(ndev);
2642 struct tx_ring *tx_ring;
2643 u32 tx_ring_idx = (u32) skb->queue_mapping;
2645 tx_ring = &qdev->tx_ring[tx_ring_idx];
2647 if (skb_padto(skb, ETH_ZLEN))
2648 return NETDEV_TX_OK;
2650 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2651 netif_info(qdev, tx_queued, qdev->ndev,
2652 "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
2653 __func__, tx_ring_idx);
2654 netif_stop_subqueue(ndev, tx_ring->wq_id);
2655 tx_ring->tx_errors++;
2656 return NETDEV_TX_BUSY;
2658 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2659 mac_iocb_ptr = tx_ring_desc->queue_entry;
2660 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2662 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2663 mac_iocb_ptr->tid = tx_ring_desc->index;
2664 /* We use the upper 32-bits to store the tx queue for this IO.
2665 * When we get the completion we can use it to establish the context.
2667 mac_iocb_ptr->txq_idx = tx_ring_idx;
2668 tx_ring_desc->skb = skb;
2670 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2672 if (skb_vlan_tag_present(skb)) {
2673 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2674 "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
2675 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2676 mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
2678 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2680 dev_kfree_skb_any(skb);
2681 return NETDEV_TX_OK;
2682 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2683 ql_hw_csum_setup(skb,
2684 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2686 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2688 netif_err(qdev, tx_queued, qdev->ndev,
2689 "Could not map the segments.\n");
2690 tx_ring->tx_errors++;
2691 return NETDEV_TX_BUSY;
2693 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2694 tx_ring->prod_idx++;
2695 if (tx_ring->prod_idx == tx_ring->wq_len)
2696 tx_ring->prod_idx = 0;
2699 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2700 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2701 "tx queued, slot %d, len %d\n",
2702 tx_ring->prod_idx, skb->len);
2704 atomic_dec(&tx_ring->tx_count);
2706 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2707 netif_stop_subqueue(ndev, tx_ring->wq_id);
2708 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2710 * The queue got stopped because the tx_ring was full.
2711 * Wake it up, because it's now at least 25% empty.
2713 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2715 return NETDEV_TX_OK;
2719 static void ql_free_shadow_space(struct ql_adapter *qdev)
2721 if (qdev->rx_ring_shadow_reg_area) {
2722 pci_free_consistent(qdev->pdev,
2724 qdev->rx_ring_shadow_reg_area,
2725 qdev->rx_ring_shadow_reg_dma);
2726 qdev->rx_ring_shadow_reg_area = NULL;
2728 if (qdev->tx_ring_shadow_reg_area) {
2729 pci_free_consistent(qdev->pdev,
2731 qdev->tx_ring_shadow_reg_area,
2732 qdev->tx_ring_shadow_reg_dma);
2733 qdev->tx_ring_shadow_reg_area = NULL;
2737 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2739 qdev->rx_ring_shadow_reg_area =
2740 pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
2741 &qdev->rx_ring_shadow_reg_dma);
2742 if (qdev->rx_ring_shadow_reg_area == NULL) {
2743 netif_err(qdev, ifup, qdev->ndev,
2744 "Allocation of RX shadow space failed.\n");
2748 qdev->tx_ring_shadow_reg_area =
2749 pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
2750 &qdev->tx_ring_shadow_reg_dma);
2751 if (qdev->tx_ring_shadow_reg_area == NULL) {
2752 netif_err(qdev, ifup, qdev->ndev,
2753 "Allocation of TX shadow space failed.\n");
2754 goto err_wqp_sh_area;
2759 pci_free_consistent(qdev->pdev,
2761 qdev->rx_ring_shadow_reg_area,
2762 qdev->rx_ring_shadow_reg_dma);
2766 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2768 struct tx_ring_desc *tx_ring_desc;
2770 struct ob_mac_iocb_req *mac_iocb_ptr;
2772 mac_iocb_ptr = tx_ring->wq_base;
2773 tx_ring_desc = tx_ring->q;
2774 for (i = 0; i < tx_ring->wq_len; i++) {
2775 tx_ring_desc->index = i;
2776 tx_ring_desc->skb = NULL;
2777 tx_ring_desc->queue_entry = mac_iocb_ptr;
2781 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2784 static void ql_free_tx_resources(struct ql_adapter *qdev,
2785 struct tx_ring *tx_ring)
2787 if (tx_ring->wq_base) {
2788 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2789 tx_ring->wq_base, tx_ring->wq_base_dma);
2790 tx_ring->wq_base = NULL;
2796 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2797 struct tx_ring *tx_ring)
2800 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2801 &tx_ring->wq_base_dma);
2803 if ((tx_ring->wq_base == NULL) ||
2804 tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2808 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2809 if (tx_ring->q == NULL)
2814 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2815 tx_ring->wq_base, tx_ring->wq_base_dma);
2816 tx_ring->wq_base = NULL;
2818 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2822 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2824 struct bq_desc *lbq_desc;
2826 uint32_t curr_idx, clean_idx;
2828 curr_idx = rx_ring->lbq_curr_idx;
2829 clean_idx = rx_ring->lbq_clean_idx;
2830 while (curr_idx != clean_idx) {
2831 lbq_desc = &rx_ring->lbq[curr_idx];
2833 if (lbq_desc->p.pg_chunk.last_flag) {
2834 pci_unmap_page(qdev->pdev,
2835 lbq_desc->p.pg_chunk.map,
2836 ql_lbq_block_size(qdev),
2837 PCI_DMA_FROMDEVICE);
2838 lbq_desc->p.pg_chunk.last_flag = 0;
2841 put_page(lbq_desc->p.pg_chunk.page);
2842 lbq_desc->p.pg_chunk.page = NULL;
2844 if (++curr_idx == rx_ring->lbq_len)
2848 if (rx_ring->pg_chunk.page) {
2849 pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map,
2850 ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
2851 put_page(rx_ring->pg_chunk.page);
2852 rx_ring->pg_chunk.page = NULL;
2856 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2859 struct bq_desc *sbq_desc;
2861 for (i = 0; i < rx_ring->sbq_len; i++) {
2862 sbq_desc = &rx_ring->sbq[i];
2863 if (sbq_desc == NULL) {
2864 netif_err(qdev, ifup, qdev->ndev,
2865 "sbq_desc %d is NULL.\n", i);
2868 if (sbq_desc->p.skb) {
2869 pci_unmap_single(qdev->pdev,
2870 dma_unmap_addr(sbq_desc, mapaddr),
2871 dma_unmap_len(sbq_desc, maplen),
2872 PCI_DMA_FROMDEVICE);
2873 dev_kfree_skb(sbq_desc->p.skb);
2874 sbq_desc->p.skb = NULL;
2879 /* Free all large and small rx buffers associated
2880 * with the completion queues for this device.
2882 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2885 struct rx_ring *rx_ring;
2887 for (i = 0; i < qdev->rx_ring_count; i++) {
2888 rx_ring = &qdev->rx_ring[i];
2890 ql_free_lbq_buffers(qdev, rx_ring);
2892 ql_free_sbq_buffers(qdev, rx_ring);
2896 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2898 struct rx_ring *rx_ring;
2901 for (i = 0; i < qdev->rx_ring_count; i++) {
2902 rx_ring = &qdev->rx_ring[i];
2903 if (rx_ring->type != TX_Q)
2904 ql_update_buffer_queues(qdev, rx_ring);
2908 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2909 struct rx_ring *rx_ring)
2912 struct bq_desc *lbq_desc;
2913 __le64 *bq = rx_ring->lbq_base;
2915 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2916 for (i = 0; i < rx_ring->lbq_len; i++) {
2917 lbq_desc = &rx_ring->lbq[i];
2918 memset(lbq_desc, 0, sizeof(*lbq_desc));
2919 lbq_desc->index = i;
2920 lbq_desc->addr = bq;
2925 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2926 struct rx_ring *rx_ring)
2929 struct bq_desc *sbq_desc;
2930 __le64 *bq = rx_ring->sbq_base;
2932 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2933 for (i = 0; i < rx_ring->sbq_len; i++) {
2934 sbq_desc = &rx_ring->sbq[i];
2935 memset(sbq_desc, 0, sizeof(*sbq_desc));
2936 sbq_desc->index = i;
2937 sbq_desc->addr = bq;
2942 static void ql_free_rx_resources(struct ql_adapter *qdev,
2943 struct rx_ring *rx_ring)
2945 /* Free the small buffer queue. */
2946 if (rx_ring->sbq_base) {
2947 pci_free_consistent(qdev->pdev,
2949 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2950 rx_ring->sbq_base = NULL;
2953 /* Free the small buffer queue control blocks. */
2954 kfree(rx_ring->sbq);
2955 rx_ring->sbq = NULL;
2957 /* Free the large buffer queue. */
2958 if (rx_ring->lbq_base) {
2959 pci_free_consistent(qdev->pdev,
2961 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2962 rx_ring->lbq_base = NULL;
2965 /* Free the large buffer queue control blocks. */
2966 kfree(rx_ring->lbq);
2967 rx_ring->lbq = NULL;
2969 /* Free the rx queue. */
2970 if (rx_ring->cq_base) {
2971 pci_free_consistent(qdev->pdev,
2973 rx_ring->cq_base, rx_ring->cq_base_dma);
2974 rx_ring->cq_base = NULL;
2978 /* Allocate queues and buffers for this completions queue based
2979 * on the values in the parameter structure. */
2980 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2981 struct rx_ring *rx_ring)
2985 * Allocate the completion queue for this rx_ring.
2988 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2989 &rx_ring->cq_base_dma);
2991 if (rx_ring->cq_base == NULL) {
2992 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2996 if (rx_ring->sbq_len) {
2998 * Allocate small buffer queue.
3001 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
3002 &rx_ring->sbq_base_dma);
3004 if (rx_ring->sbq_base == NULL) {
3005 netif_err(qdev, ifup, qdev->ndev,
3006 "Small buffer queue allocation failed.\n");
3011 * Allocate small buffer queue control blocks.
3013 rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
3014 sizeof(struct bq_desc),
3016 if (rx_ring->sbq == NULL)
3019 ql_init_sbq_ring(qdev, rx_ring);
3022 if (rx_ring->lbq_len) {
3024 * Allocate large buffer queue.
3027 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
3028 &rx_ring->lbq_base_dma);
3030 if (rx_ring->lbq_base == NULL) {
3031 netif_err(qdev, ifup, qdev->ndev,
3032 "Large buffer queue allocation failed.\n");
3036 * Allocate large buffer queue control blocks.
3038 rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
3039 sizeof(struct bq_desc),
3041 if (rx_ring->lbq == NULL)
3044 ql_init_lbq_ring(qdev, rx_ring);
3050 ql_free_rx_resources(qdev, rx_ring);
3054 static void ql_tx_ring_clean(struct ql_adapter *qdev)
3056 struct tx_ring *tx_ring;
3057 struct tx_ring_desc *tx_ring_desc;
3061 * Loop through all queues and free
3064 for (j = 0; j < qdev->tx_ring_count; j++) {
3065 tx_ring = &qdev->tx_ring[j];
3066 for (i = 0; i < tx_ring->wq_len; i++) {
3067 tx_ring_desc = &tx_ring->q[i];
3068 if (tx_ring_desc && tx_ring_desc->skb) {
3069 netif_err(qdev, ifdown, qdev->ndev,
3070 "Freeing lost SKB %p, from queue %d, index %d.\n",
3071 tx_ring_desc->skb, j,
3072 tx_ring_desc->index);
3073 ql_unmap_send(qdev, tx_ring_desc,
3074 tx_ring_desc->map_cnt);
3075 dev_kfree_skb(tx_ring_desc->skb);
3076 tx_ring_desc->skb = NULL;
3082 static void ql_free_mem_resources(struct ql_adapter *qdev)
3086 for (i = 0; i < qdev->tx_ring_count; i++)
3087 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3088 for (i = 0; i < qdev->rx_ring_count; i++)
3089 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3090 ql_free_shadow_space(qdev);
3093 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3097 /* Allocate space for our shadow registers and such. */
3098 if (ql_alloc_shadow_space(qdev))
3101 for (i = 0; i < qdev->rx_ring_count; i++) {
3102 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
3103 netif_err(qdev, ifup, qdev->ndev,
3104 "RX resource allocation failed.\n");
3108 /* Allocate tx queue resources */
3109 for (i = 0; i < qdev->tx_ring_count; i++) {
3110 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
3111 netif_err(qdev, ifup, qdev->ndev,
3112 "TX resource allocation failed.\n");
3119 ql_free_mem_resources(qdev);
3123 /* Set up the rx ring control block and pass it to the chip.
3124 * The control block is defined as
3125 * "Completion Queue Initialization Control Block", or cqicb.
3127 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3129 struct cqicb *cqicb = &rx_ring->cqicb;
3130 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3131 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3132 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3133 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3134 void __iomem *doorbell_area =
3135 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3139 __le64 *base_indirect_ptr;
3142 /* Set up the shadow registers for this ring. */
3143 rx_ring->prod_idx_sh_reg = shadow_reg;
3144 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3145 *rx_ring->prod_idx_sh_reg = 0;
3146 shadow_reg += sizeof(u64);
3147 shadow_reg_dma += sizeof(u64);
3148 rx_ring->lbq_base_indirect = shadow_reg;
3149 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3150 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3151 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3152 rx_ring->sbq_base_indirect = shadow_reg;
3153 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3155 /* PCI doorbell mem area + 0x00 for consumer index register */
3156 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3157 rx_ring->cnsmr_idx = 0;
3158 rx_ring->curr_entry = rx_ring->cq_base;
3160 /* PCI doorbell mem area + 0x04 for valid register */
3161 rx_ring->valid_db_reg = doorbell_area + 0x04;
3163 /* PCI doorbell mem area + 0x18 for large buffer consumer */
3164 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3166 /* PCI doorbell mem area + 0x1c */
3167 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3169 memset((void *)cqicb, 0, sizeof(struct cqicb));
3170 cqicb->msix_vect = rx_ring->irq;
3172 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3173 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3175 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3177 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3180 * Set up the control block load flags.
3182 cqicb->flags = FLAGS_LC | /* Load queue base address */
3183 FLAGS_LV | /* Load MSI-X vector */
3184 FLAGS_LI; /* Load irq delay values */
3185 if (rx_ring->lbq_len) {
3186 cqicb->flags |= FLAGS_LL; /* Load lbq values */
3187 tmp = (u64)rx_ring->lbq_base_dma;
3188 base_indirect_ptr = rx_ring->lbq_base_indirect;
3191 *base_indirect_ptr = cpu_to_le64(tmp);
3192 tmp += DB_PAGE_SIZE;
3193 base_indirect_ptr++;
3195 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3197 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3198 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3199 (u16) rx_ring->lbq_buf_size;
3200 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3201 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3202 (u16) rx_ring->lbq_len;
3203 cqicb->lbq_len = cpu_to_le16(bq_len);
3204 rx_ring->lbq_prod_idx = 0;
3205 rx_ring->lbq_curr_idx = 0;
3206 rx_ring->lbq_clean_idx = 0;
3207 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3209 if (rx_ring->sbq_len) {
3210 cqicb->flags |= FLAGS_LS; /* Load sbq values */
3211 tmp = (u64)rx_ring->sbq_base_dma;
3212 base_indirect_ptr = rx_ring->sbq_base_indirect;
3215 *base_indirect_ptr = cpu_to_le64(tmp);
3216 tmp += DB_PAGE_SIZE;
3217 base_indirect_ptr++;
3219 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
3221 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
3222 cqicb->sbq_buf_size =
3223 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
3224 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3225 (u16) rx_ring->sbq_len;
3226 cqicb->sbq_len = cpu_to_le16(bq_len);
3227 rx_ring->sbq_prod_idx = 0;
3228 rx_ring->sbq_curr_idx = 0;
3229 rx_ring->sbq_clean_idx = 0;
3230 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
3232 switch (rx_ring->type) {
3234 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3235 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3238 /* Inbound completion handling rx_rings run in
3239 * separate NAPI contexts.
3241 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3243 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3244 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3247 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3248 "Invalid rx_ring->type = %d.\n", rx_ring->type);
3250 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3251 CFG_LCQ, rx_ring->cq_id);
3253 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3259 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3261 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3262 void __iomem *doorbell_area =
3263 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3264 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3265 (tx_ring->wq_id * sizeof(u64));
3266 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3267 (tx_ring->wq_id * sizeof(u64));
3271 * Assign doorbell registers for this tx_ring.
3273 /* TX PCI doorbell mem area for tx producer index */
3274 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3275 tx_ring->prod_idx = 0;
3276 /* TX PCI doorbell mem area + 0x04 */
3277 tx_ring->valid_db_reg = doorbell_area + 0x04;
3280 * Assign shadow registers for this tx_ring.
3282 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3283 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3285 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3286 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3287 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3288 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3290 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3292 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3294 ql_init_tx_ring(qdev, tx_ring);
3296 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3297 (u16) tx_ring->wq_id);
3299 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3305 static void ql_disable_msix(struct ql_adapter *qdev)
3307 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3308 pci_disable_msix(qdev->pdev);
3309 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3310 kfree(qdev->msi_x_entry);
3311 qdev->msi_x_entry = NULL;
3312 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3313 pci_disable_msi(qdev->pdev);
3314 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3318 /* We start by trying to get the number of vectors
3319 * stored in qdev->intr_count. If we don't get that
3320 * many then we reduce the count and try again.
3322 static void ql_enable_msix(struct ql_adapter *qdev)
3326 /* Get the MSIX vectors. */
3327 if (qlge_irq_type == MSIX_IRQ) {
3328 /* Try to alloc space for the msix struct,
3329 * if it fails then go to MSI/legacy.
3331 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3332 sizeof(struct msix_entry),
3334 if (!qdev->msi_x_entry) {
3335 qlge_irq_type = MSI_IRQ;
3339 for (i = 0; i < qdev->intr_count; i++)
3340 qdev->msi_x_entry[i].entry = i;
3342 err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
3343 1, qdev->intr_count);
3345 kfree(qdev->msi_x_entry);
3346 qdev->msi_x_entry = NULL;
3347 netif_warn(qdev, ifup, qdev->ndev,
3348 "MSI-X Enable failed, trying MSI.\n");
3349 qlge_irq_type = MSI_IRQ;
3351 qdev->intr_count = err;
3352 set_bit(QL_MSIX_ENABLED, &qdev->flags);
3353 netif_info(qdev, ifup, qdev->ndev,
3354 "MSI-X Enabled, got %d vectors.\n",
3360 qdev->intr_count = 1;
3361 if (qlge_irq_type == MSI_IRQ) {
3362 if (!pci_enable_msi(qdev->pdev)) {
3363 set_bit(QL_MSI_ENABLED, &qdev->flags);
3364 netif_info(qdev, ifup, qdev->ndev,
3365 "Running with MSI interrupts.\n");
3369 qlge_irq_type = LEG_IRQ;
3370 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3371 "Running with legacy interrupts.\n");
3374 /* Each vector services 1 RSS ring and and 1 or more
3375 * TX completion rings. This function loops through
3376 * the TX completion rings and assigns the vector that
3377 * will service it. An example would be if there are
3378 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3379 * This would mean that vector 0 would service RSS ring 0
3380 * and TX completion rings 0,1,2 and 3. Vector 1 would
3381 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3383 static void ql_set_tx_vect(struct ql_adapter *qdev)
3386 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3388 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3389 /* Assign irq vectors to TX rx_rings.*/
3390 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3391 i < qdev->rx_ring_count; i++) {
3392 if (j == tx_rings_per_vector) {
3396 qdev->rx_ring[i].irq = vect;
3400 /* For single vector all rings have an irq
3403 for (i = 0; i < qdev->rx_ring_count; i++)
3404 qdev->rx_ring[i].irq = 0;
3408 /* Set the interrupt mask for this vector. Each vector
3409 * will service 1 RSS ring and 1 or more TX completion
3410 * rings. This function sets up a bit mask per vector
3411 * that indicates which rings it services.
3413 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3415 int j, vect = ctx->intr;
3416 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3418 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3419 /* Add the RSS ring serviced by this vector
3422 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3423 /* Add the TX ring(s) serviced by this vector
3425 for (j = 0; j < tx_rings_per_vector; j++) {
3427 (1 << qdev->rx_ring[qdev->rss_ring_count +
3428 (vect * tx_rings_per_vector) + j].cq_id);
3431 /* For single vector we just shift each queue's
3434 for (j = 0; j < qdev->rx_ring_count; j++)
3435 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3440 * Here we build the intr_context structures based on
3441 * our rx_ring count and intr vector count.
3442 * The intr_context structure is used to hook each vector
3443 * to possibly different handlers.
3445 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3448 struct intr_context *intr_context = &qdev->intr_context[0];
3450 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3451 /* Each rx_ring has it's
3452 * own intr_context since we have separate
3453 * vectors for each queue.
3455 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3456 qdev->rx_ring[i].irq = i;
3457 intr_context->intr = i;
3458 intr_context->qdev = qdev;
3459 /* Set up this vector's bit-mask that indicates
3460 * which queues it services.
3462 ql_set_irq_mask(qdev, intr_context);
3464 * We set up each vectors enable/disable/read bits so
3465 * there's no bit/mask calculations in the critical path.
3467 intr_context->intr_en_mask =
3468 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3469 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3471 intr_context->intr_dis_mask =
3472 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3473 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3475 intr_context->intr_read_mask =
3476 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3477 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3480 /* The first vector/queue handles
3481 * broadcast/multicast, fatal errors,
3482 * and firmware events. This in addition
3483 * to normal inbound NAPI processing.
3485 intr_context->handler = qlge_isr;
3486 sprintf(intr_context->name, "%s-rx-%d",
3487 qdev->ndev->name, i);
3490 * Inbound queues handle unicast frames only.
3492 intr_context->handler = qlge_msix_rx_isr;
3493 sprintf(intr_context->name, "%s-rx-%d",
3494 qdev->ndev->name, i);
3499 * All rx_rings use the same intr_context since
3500 * there is only one vector.
3502 intr_context->intr = 0;
3503 intr_context->qdev = qdev;
3505 * We set up each vectors enable/disable/read bits so
3506 * there's no bit/mask calculations in the critical path.
3508 intr_context->intr_en_mask =
3509 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3510 intr_context->intr_dis_mask =
3511 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3512 INTR_EN_TYPE_DISABLE;
3513 intr_context->intr_read_mask =
3514 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3516 * Single interrupt means one handler for all rings.
3518 intr_context->handler = qlge_isr;
3519 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3520 /* Set up this vector's bit-mask that indicates
3521 * which queues it services. In this case there is
3522 * a single vector so it will service all RSS and
3523 * TX completion rings.
3525 ql_set_irq_mask(qdev, intr_context);
3527 /* Tell the TX completion rings which MSIx vector
3528 * they will be using.
3530 ql_set_tx_vect(qdev);
3533 static void ql_free_irq(struct ql_adapter *qdev)
3536 struct intr_context *intr_context = &qdev->intr_context[0];
3538 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3539 if (intr_context->hooked) {
3540 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3541 free_irq(qdev->msi_x_entry[i].vector,
3544 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3548 ql_disable_msix(qdev);
3551 static int ql_request_irq(struct ql_adapter *qdev)
3555 struct pci_dev *pdev = qdev->pdev;
3556 struct intr_context *intr_context = &qdev->intr_context[0];
3558 ql_resolve_queues_to_irqs(qdev);
3560 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3561 atomic_set(&intr_context->irq_cnt, 0);
3562 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3563 status = request_irq(qdev->msi_x_entry[i].vector,
3564 intr_context->handler,
3569 netif_err(qdev, ifup, qdev->ndev,
3570 "Failed request for MSIX interrupt %d.\n",
3575 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3576 "trying msi or legacy interrupts.\n");
3577 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3578 "%s: irq = %d.\n", __func__, pdev->irq);
3579 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3580 "%s: context->name = %s.\n", __func__,
3581 intr_context->name);
3582 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3583 "%s: dev_id = 0x%p.\n", __func__,
3586 request_irq(pdev->irq, qlge_isr,
3587 test_bit(QL_MSI_ENABLED,
3589 flags) ? 0 : IRQF_SHARED,
3590 intr_context->name, &qdev->rx_ring[0]);
3594 netif_err(qdev, ifup, qdev->ndev,
3595 "Hooked intr %d, queue type %s, with name %s.\n",
3597 qdev->rx_ring[0].type == DEFAULT_Q ?
3599 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3600 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3601 intr_context->name);
3603 intr_context->hooked = 1;
3607 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
3612 static int ql_start_rss(struct ql_adapter *qdev)
3614 static const u8 init_hash_seed[] = {
3615 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3616 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3617 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3618 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3619 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3621 struct ricb *ricb = &qdev->ricb;
3624 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3626 memset((void *)ricb, 0, sizeof(*ricb));
3628 ricb->base_cq = RSS_L4K;
3630 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3631 ricb->mask = cpu_to_le16((u16)(0x3ff));
3634 * Fill out the Indirection Table.
3636 for (i = 0; i < 1024; i++)
3637 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3639 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3640 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3642 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3644 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3650 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3654 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3657 /* Clear all the entries in the routing table. */
3658 for (i = 0; i < 16; i++) {
3659 status = ql_set_routing_reg(qdev, i, 0, 0);
3661 netif_err(qdev, ifup, qdev->ndev,
3662 "Failed to init routing register for CAM packets.\n");
3666 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3670 /* Initialize the frame-to-queue routing. */
3671 static int ql_route_initialize(struct ql_adapter *qdev)
3675 /* Clear all the entries in the routing table. */
3676 status = ql_clear_routing_entries(qdev);
3680 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3684 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3685 RT_IDX_IP_CSUM_ERR, 1);
3687 netif_err(qdev, ifup, qdev->ndev,
3688 "Failed to init routing register "
3689 "for IP CSUM error packets.\n");
3692 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3693 RT_IDX_TU_CSUM_ERR, 1);
3695 netif_err(qdev, ifup, qdev->ndev,
3696 "Failed to init routing register "
3697 "for TCP/UDP CSUM error packets.\n");
3700 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3702 netif_err(qdev, ifup, qdev->ndev,
3703 "Failed to init routing register for broadcast packets.\n");
3706 /* If we have more than one inbound queue, then turn on RSS in the
3709 if (qdev->rss_ring_count > 1) {
3710 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3711 RT_IDX_RSS_MATCH, 1);
3713 netif_err(qdev, ifup, qdev->ndev,
3714 "Failed to init routing register for MATCH RSS packets.\n");
3719 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3722 netif_err(qdev, ifup, qdev->ndev,
3723 "Failed to init routing register for CAM packets.\n");
3725 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3729 int ql_cam_route_initialize(struct ql_adapter *qdev)
3733 /* If check if the link is up and use to
3734 * determine if we are setting or clearing
3735 * the MAC address in the CAM.
3737 set = ql_read32(qdev, STS);
3738 set &= qdev->port_link_up;
3739 status = ql_set_mac_addr(qdev, set);
3741 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3745 status = ql_route_initialize(qdev);
3747 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3752 static int ql_adapter_initialize(struct ql_adapter *qdev)
3759 * Set up the System register to halt on errors.
3761 value = SYS_EFE | SYS_FAE;
3763 ql_write32(qdev, SYS, mask | value);
3765 /* Set the default queue, and VLAN behavior. */
3766 value = NIC_RCV_CFG_DFQ;
3767 mask = NIC_RCV_CFG_DFQ_MASK;
3768 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
3769 value |= NIC_RCV_CFG_RV;
3770 mask |= (NIC_RCV_CFG_RV << 16);
3772 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3774 /* Set the MPI interrupt to enabled. */
3775 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3777 /* Enable the function, set pagesize, enable error checking. */
3778 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3779 FSC_EC | FSC_VM_PAGE_4K;
3780 value |= SPLT_SETTING;
3782 /* Set/clear header splitting. */
3783 mask = FSC_VM_PAGESIZE_MASK |
3784 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3785 ql_write32(qdev, FSC, mask | value);
3787 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3789 /* Set RX packet routing to use port/pci function on which the
3790 * packet arrived on in addition to usual frame routing.
3791 * This is helpful on bonding where both interfaces can have
3792 * the same MAC address.
3794 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3795 /* Reroute all packets to our Interface.
3796 * They may have been routed to MPI firmware
3799 value = ql_read32(qdev, MGMT_RCV_CFG);
3800 value &= ~MGMT_RCV_CFG_RM;
3803 /* Sticky reg needs clearing due to WOL. */
3804 ql_write32(qdev, MGMT_RCV_CFG, mask);
3805 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3807 /* Default WOL is enable on Mezz cards */
3808 if (qdev->pdev->subsystem_device == 0x0068 ||
3809 qdev->pdev->subsystem_device == 0x0180)
3810 qdev->wol = WAKE_MAGIC;
3812 /* Start up the rx queues. */
3813 for (i = 0; i < qdev->rx_ring_count; i++) {
3814 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3816 netif_err(qdev, ifup, qdev->ndev,
3817 "Failed to start rx ring[%d].\n", i);
3822 /* If there is more than one inbound completion queue
3823 * then download a RICB to configure RSS.
3825 if (qdev->rss_ring_count > 1) {
3826 status = ql_start_rss(qdev);
3828 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3833 /* Start up the tx queues. */
3834 for (i = 0; i < qdev->tx_ring_count; i++) {
3835 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3837 netif_err(qdev, ifup, qdev->ndev,
3838 "Failed to start tx ring[%d].\n", i);
3843 /* Initialize the port and set the max framesize. */
3844 status = qdev->nic_ops->port_initialize(qdev);
3846 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3848 /* Set up the MAC address and frame routing filter. */
3849 status = ql_cam_route_initialize(qdev);
3851 netif_err(qdev, ifup, qdev->ndev,
3852 "Failed to init CAM/Routing tables.\n");
3856 /* Start NAPI for the RSS queues. */
3857 for (i = 0; i < qdev->rss_ring_count; i++)
3858 napi_enable(&qdev->rx_ring[i].napi);
3863 /* Issue soft reset to chip. */
3864 static int ql_adapter_reset(struct ql_adapter *qdev)
3868 unsigned long end_jiffies;
3870 /* Clear all the entries in the routing table. */
3871 status = ql_clear_routing_entries(qdev);
3873 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3877 /* Check if bit is set then skip the mailbox command and
3878 * clear the bit, else we are in normal reset process.
3880 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3881 /* Stop management traffic. */
3882 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3884 /* Wait for the NIC and MGMNT FIFOs to empty. */
3885 ql_wait_fifo_empty(qdev);
3887 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3889 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3891 end_jiffies = jiffies + usecs_to_jiffies(30);
3893 value = ql_read32(qdev, RST_FO);
3894 if ((value & RST_FO_FR) == 0)
3897 } while (time_before(jiffies, end_jiffies));
3899 if (value & RST_FO_FR) {
3900 netif_err(qdev, ifdown, qdev->ndev,
3901 "ETIMEDOUT!!! errored out of resetting the chip!\n");
3902 status = -ETIMEDOUT;
3905 /* Resume management traffic. */
3906 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3910 static void ql_display_dev_info(struct net_device *ndev)
3912 struct ql_adapter *qdev = netdev_priv(ndev);
3914 netif_info(qdev, probe, qdev->ndev,
3915 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3916 "XG Roll = %d, XG Rev = %d.\n",
3919 qdev->chip_rev_id & 0x0000000f,
3920 qdev->chip_rev_id >> 4 & 0x0000000f,
3921 qdev->chip_rev_id >> 8 & 0x0000000f,
3922 qdev->chip_rev_id >> 12 & 0x0000000f);
3923 netif_info(qdev, probe, qdev->ndev,
3924 "MAC address %pM\n", ndev->dev_addr);
3927 static int ql_wol(struct ql_adapter *qdev)
3930 u32 wol = MB_WOL_DISABLE;
3932 /* The CAM is still intact after a reset, but if we
3933 * are doing WOL, then we may need to program the
3934 * routing regs. We would also need to issue the mailbox
3935 * commands to instruct the MPI what to do per the ethtool
3939 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3940 WAKE_MCAST | WAKE_BCAST)) {
3941 netif_err(qdev, ifdown, qdev->ndev,
3942 "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
3947 if (qdev->wol & WAKE_MAGIC) {
3948 status = ql_mb_wol_set_magic(qdev, 1);
3950 netif_err(qdev, ifdown, qdev->ndev,
3951 "Failed to set magic packet on %s.\n",
3955 netif_info(qdev, drv, qdev->ndev,
3956 "Enabled magic packet successfully on %s.\n",
3959 wol |= MB_WOL_MAGIC_PKT;
3963 wol |= MB_WOL_MODE_ON;
3964 status = ql_mb_wol_mode(qdev, wol);
3965 netif_err(qdev, drv, qdev->ndev,
3966 "WOL %s (wol code 0x%x) on %s\n",
3967 (status == 0) ? "Successfully set" : "Failed",
3968 wol, qdev->ndev->name);
3974 static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3977 /* Don't kill the reset worker thread if we
3978 * are in the process of recovery.
3980 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3981 cancel_delayed_work_sync(&qdev->asic_reset_work);
3982 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3983 cancel_delayed_work_sync(&qdev->mpi_work);
3984 cancel_delayed_work_sync(&qdev->mpi_idc_work);
3985 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3986 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3989 static int ql_adapter_down(struct ql_adapter *qdev)
3995 ql_cancel_all_work_sync(qdev);
3997 for (i = 0; i < qdev->rss_ring_count; i++)
3998 napi_disable(&qdev->rx_ring[i].napi);
4000 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4002 ql_disable_interrupts(qdev);
4004 ql_tx_ring_clean(qdev);
4006 /* Call netif_napi_del() from common point.
4008 for (i = 0; i < qdev->rss_ring_count; i++)
4009 netif_napi_del(&qdev->rx_ring[i].napi);
4011 status = ql_adapter_reset(qdev);
4013 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
4015 ql_free_rx_buffers(qdev);
4020 static int ql_adapter_up(struct ql_adapter *qdev)
4024 err = ql_adapter_initialize(qdev);
4026 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
4029 set_bit(QL_ADAPTER_UP, &qdev->flags);
4030 ql_alloc_rx_buffers(qdev);
4031 /* If the port is initialized and the
4032 * link is up the turn on the carrier.
4034 if ((ql_read32(qdev, STS) & qdev->port_init) &&
4035 (ql_read32(qdev, STS) & qdev->port_link_up))
4037 /* Restore rx mode. */
4038 clear_bit(QL_ALLMULTI, &qdev->flags);
4039 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4040 qlge_set_multicast_list(qdev->ndev);
4042 /* Restore vlan setting. */
4043 qlge_restore_vlan(qdev);
4045 ql_enable_interrupts(qdev);
4046 ql_enable_all_completion_interrupts(qdev);
4047 netif_tx_start_all_queues(qdev->ndev);
4051 ql_adapter_reset(qdev);
4055 static void ql_release_adapter_resources(struct ql_adapter *qdev)
4057 ql_free_mem_resources(qdev);
4061 static int ql_get_adapter_resources(struct ql_adapter *qdev)
4065 if (ql_alloc_mem_resources(qdev)) {
4066 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
4069 status = ql_request_irq(qdev);
4073 static int qlge_close(struct net_device *ndev)
4075 struct ql_adapter *qdev = netdev_priv(ndev);
4077 /* If we hit pci_channel_io_perm_failure
4078 * failure condition, then we already
4079 * brought the adapter down.
4081 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
4082 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
4083 clear_bit(QL_EEH_FATAL, &qdev->flags);
4088 * Wait for device to recover from a reset.
4089 * (Rarely happens, but possible.)
4091 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4093 ql_adapter_down(qdev);
4094 ql_release_adapter_resources(qdev);
4098 static int ql_configure_rings(struct ql_adapter *qdev)
4101 struct rx_ring *rx_ring;
4102 struct tx_ring *tx_ring;
4103 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
4104 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4105 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4107 qdev->lbq_buf_order = get_order(lbq_buf_len);
4109 /* In a perfect world we have one RSS ring for each CPU
4110 * and each has it's own vector. To do that we ask for
4111 * cpu_cnt vectors. ql_enable_msix() will adjust the
4112 * vector count to what we actually get. We then
4113 * allocate an RSS ring for each.
4114 * Essentially, we are doing min(cpu_count, msix_vector_count).
4116 qdev->intr_count = cpu_cnt;
4117 ql_enable_msix(qdev);
4118 /* Adjust the RSS ring count to the actual vector count. */
4119 qdev->rss_ring_count = qdev->intr_count;
4120 qdev->tx_ring_count = cpu_cnt;
4121 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
4123 for (i = 0; i < qdev->tx_ring_count; i++) {
4124 tx_ring = &qdev->tx_ring[i];
4125 memset((void *)tx_ring, 0, sizeof(*tx_ring));
4126 tx_ring->qdev = qdev;
4128 tx_ring->wq_len = qdev->tx_ring_size;
4130 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4133 * The completion queue ID for the tx rings start
4134 * immediately after the rss rings.
4136 tx_ring->cq_id = qdev->rss_ring_count + i;
4139 for (i = 0; i < qdev->rx_ring_count; i++) {
4140 rx_ring = &qdev->rx_ring[i];
4141 memset((void *)rx_ring, 0, sizeof(*rx_ring));
4142 rx_ring->qdev = qdev;
4144 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
4145 if (i < qdev->rss_ring_count) {
4147 * Inbound (RSS) queues.
4149 rx_ring->cq_len = qdev->rx_ring_size;
4151 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4152 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4154 rx_ring->lbq_len * sizeof(__le64);
4155 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4156 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4158 rx_ring->sbq_len * sizeof(__le64);
4159 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
4160 rx_ring->type = RX_Q;
4163 * Outbound queue handles outbound completions only.
4165 /* outbound cq is same size as tx_ring it services. */
4166 rx_ring->cq_len = qdev->tx_ring_size;
4168 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4169 rx_ring->lbq_len = 0;
4170 rx_ring->lbq_size = 0;
4171 rx_ring->lbq_buf_size = 0;
4172 rx_ring->sbq_len = 0;
4173 rx_ring->sbq_size = 0;
4174 rx_ring->sbq_buf_size = 0;
4175 rx_ring->type = TX_Q;
4181 static int qlge_open(struct net_device *ndev)
4184 struct ql_adapter *qdev = netdev_priv(ndev);
4186 err = ql_adapter_reset(qdev);
4190 err = ql_configure_rings(qdev);
4194 err = ql_get_adapter_resources(qdev);
4198 err = ql_adapter_up(qdev);
4205 ql_release_adapter_resources(qdev);
4209 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4211 struct rx_ring *rx_ring;
4215 /* Wait for an outstanding reset to complete. */
4216 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4219 while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4220 netif_err(qdev, ifup, qdev->ndev,
4221 "Waiting for adapter UP...\n");
4226 netif_err(qdev, ifup, qdev->ndev,
4227 "Timed out waiting for adapter UP\n");
4232 status = ql_adapter_down(qdev);
4236 /* Get the new rx buffer size. */
4237 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4238 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4239 qdev->lbq_buf_order = get_order(lbq_buf_len);
4241 for (i = 0; i < qdev->rss_ring_count; i++) {
4242 rx_ring = &qdev->rx_ring[i];
4243 /* Set the new size. */
4244 rx_ring->lbq_buf_size = lbq_buf_len;
4247 status = ql_adapter_up(qdev);
4253 netif_alert(qdev, ifup, qdev->ndev,
4254 "Driver up/down cycle failed, closing device.\n");
4255 set_bit(QL_ADAPTER_UP, &qdev->flags);
4256 dev_close(qdev->ndev);
4260 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4262 struct ql_adapter *qdev = netdev_priv(ndev);
4265 if (ndev->mtu == 1500 && new_mtu == 9000) {
4266 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4267 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
4268 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4272 queue_delayed_work(qdev->workqueue,
4273 &qdev->mpi_port_cfg_work, 3*HZ);
4275 ndev->mtu = new_mtu;
4277 if (!netif_running(qdev->ndev)) {
4281 status = ql_change_rx_buffers(qdev);
4283 netif_err(qdev, ifup, qdev->ndev,
4284 "Changing MTU failed.\n");
4290 static struct net_device_stats *qlge_get_stats(struct net_device
4293 struct ql_adapter *qdev = netdev_priv(ndev);
4294 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4295 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4296 unsigned long pkts, mcast, dropped, errors, bytes;
4300 pkts = mcast = dropped = errors = bytes = 0;
4301 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4302 pkts += rx_ring->rx_packets;
4303 bytes += rx_ring->rx_bytes;
4304 dropped += rx_ring->rx_dropped;
4305 errors += rx_ring->rx_errors;
4306 mcast += rx_ring->rx_multicast;
4308 ndev->stats.rx_packets = pkts;
4309 ndev->stats.rx_bytes = bytes;
4310 ndev->stats.rx_dropped = dropped;
4311 ndev->stats.rx_errors = errors;
4312 ndev->stats.multicast = mcast;
4315 pkts = errors = bytes = 0;
4316 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4317 pkts += tx_ring->tx_packets;
4318 bytes += tx_ring->tx_bytes;
4319 errors += tx_ring->tx_errors;
4321 ndev->stats.tx_packets = pkts;
4322 ndev->stats.tx_bytes = bytes;
4323 ndev->stats.tx_errors = errors;
4324 return &ndev->stats;
4327 static void qlge_set_multicast_list(struct net_device *ndev)
4329 struct ql_adapter *qdev = netdev_priv(ndev);
4330 struct netdev_hw_addr *ha;
4333 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4337 * Set or clear promiscuous mode if a
4338 * transition is taking place.
4340 if (ndev->flags & IFF_PROMISC) {
4341 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4342 if (ql_set_routing_reg
4343 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4344 netif_err(qdev, hw, qdev->ndev,
4345 "Failed to set promiscuous mode.\n");
4347 set_bit(QL_PROMISCUOUS, &qdev->flags);
4351 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4352 if (ql_set_routing_reg
4353 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4354 netif_err(qdev, hw, qdev->ndev,
4355 "Failed to clear promiscuous mode.\n");
4357 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4363 * Set or clear all multicast mode if a
4364 * transition is taking place.
4366 if ((ndev->flags & IFF_ALLMULTI) ||
4367 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4368 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4369 if (ql_set_routing_reg
4370 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4371 netif_err(qdev, hw, qdev->ndev,
4372 "Failed to set all-multi mode.\n");
4374 set_bit(QL_ALLMULTI, &qdev->flags);
4378 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4379 if (ql_set_routing_reg
4380 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4381 netif_err(qdev, hw, qdev->ndev,
4382 "Failed to clear all-multi mode.\n");
4384 clear_bit(QL_ALLMULTI, &qdev->flags);
4389 if (!netdev_mc_empty(ndev)) {
4390 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4394 netdev_for_each_mc_addr(ha, ndev) {
4395 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4396 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4397 netif_err(qdev, hw, qdev->ndev,
4398 "Failed to loadmulticast address.\n");
4399 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4404 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4405 if (ql_set_routing_reg
4406 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4407 netif_err(qdev, hw, qdev->ndev,
4408 "Failed to set multicast match mode.\n");
4410 set_bit(QL_ALLMULTI, &qdev->flags);
4414 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4417 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4419 struct ql_adapter *qdev = netdev_priv(ndev);
4420 struct sockaddr *addr = p;
4423 if (!is_valid_ether_addr(addr->sa_data))
4424 return -EADDRNOTAVAIL;
4425 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4426 /* Update local copy of current mac address. */
4427 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4429 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4432 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4433 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4435 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4436 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4440 static void qlge_tx_timeout(struct net_device *ndev)
4442 struct ql_adapter *qdev = netdev_priv(ndev);
4443 ql_queue_asic_error(qdev);
4446 static void ql_asic_reset_work(struct work_struct *work)
4448 struct ql_adapter *qdev =
4449 container_of(work, struct ql_adapter, asic_reset_work.work);
4452 status = ql_adapter_down(qdev);
4456 status = ql_adapter_up(qdev);
4460 /* Restore rx mode. */
4461 clear_bit(QL_ALLMULTI, &qdev->flags);
4462 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4463 qlge_set_multicast_list(qdev->ndev);
4468 netif_alert(qdev, ifup, qdev->ndev,
4469 "Driver up/down cycle failed, closing device\n");
4471 set_bit(QL_ADAPTER_UP, &qdev->flags);
4472 dev_close(qdev->ndev);
4476 static const struct nic_operations qla8012_nic_ops = {
4477 .get_flash = ql_get_8012_flash_params,
4478 .port_initialize = ql_8012_port_initialize,
4481 static const struct nic_operations qla8000_nic_ops = {
4482 .get_flash = ql_get_8000_flash_params,
4483 .port_initialize = ql_8000_port_initialize,
4486 /* Find the pcie function number for the other NIC
4487 * on this chip. Since both NIC functions share a
4488 * common firmware we have the lowest enabled function
4489 * do any common work. Examples would be resetting
4490 * after a fatal firmware error, or doing a firmware
4493 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4497 u32 nic_func1, nic_func2;
4499 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4504 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4505 MPI_TEST_NIC_FUNC_MASK);
4506 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4507 MPI_TEST_NIC_FUNC_MASK);
4509 if (qdev->func == nic_func1)
4510 qdev->alt_func = nic_func2;
4511 else if (qdev->func == nic_func2)
4512 qdev->alt_func = nic_func1;
4519 static int ql_get_board_info(struct ql_adapter *qdev)
4523 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4527 status = ql_get_alt_pcie_func(qdev);
4531 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4533 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4534 qdev->port_link_up = STS_PL1;
4535 qdev->port_init = STS_PI1;
4536 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4537 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4539 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4540 qdev->port_link_up = STS_PL0;
4541 qdev->port_init = STS_PI0;
4542 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4543 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4545 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4546 qdev->device_id = qdev->pdev->device;
4547 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4548 qdev->nic_ops = &qla8012_nic_ops;
4549 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4550 qdev->nic_ops = &qla8000_nic_ops;
4554 static void ql_release_all(struct pci_dev *pdev)
4556 struct net_device *ndev = pci_get_drvdata(pdev);
4557 struct ql_adapter *qdev = netdev_priv(ndev);
4559 if (qdev->workqueue) {
4560 destroy_workqueue(qdev->workqueue);
4561 qdev->workqueue = NULL;
4565 iounmap(qdev->reg_base);
4566 if (qdev->doorbell_area)
4567 iounmap(qdev->doorbell_area);
4568 vfree(qdev->mpi_coredump);
4569 pci_release_regions(pdev);
4572 static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
4575 struct ql_adapter *qdev = netdev_priv(ndev);
4578 memset((void *)qdev, 0, sizeof(*qdev));
4579 err = pci_enable_device(pdev);
4581 dev_err(&pdev->dev, "PCI device enable failed.\n");
4587 pci_set_drvdata(pdev, ndev);
4589 /* Set PCIe read request size */
4590 err = pcie_set_readrq(pdev, 4096);
4592 dev_err(&pdev->dev, "Set readrq failed.\n");
4596 err = pci_request_regions(pdev, DRV_NAME);
4598 dev_err(&pdev->dev, "PCI region request failed.\n");
4602 pci_set_master(pdev);
4603 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4604 set_bit(QL_DMA64, &qdev->flags);
4605 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4607 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4609 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4613 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4617 /* Set PCIe reset type for EEH to fundamental. */
4618 pdev->needs_freset = 1;
4619 pci_save_state(pdev);
4621 ioremap_nocache(pci_resource_start(pdev, 1),
4622 pci_resource_len(pdev, 1));
4623 if (!qdev->reg_base) {
4624 dev_err(&pdev->dev, "Register mapping failed.\n");
4629 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4630 qdev->doorbell_area =
4631 ioremap_nocache(pci_resource_start(pdev, 3),
4632 pci_resource_len(pdev, 3));
4633 if (!qdev->doorbell_area) {
4634 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4639 err = ql_get_board_info(qdev);
4641 dev_err(&pdev->dev, "Register access failed.\n");
4645 qdev->msg_enable = netif_msg_init(debug, default_msg);
4646 spin_lock_init(&qdev->hw_lock);
4647 spin_lock_init(&qdev->stats_lock);
4649 if (qlge_mpi_coredump) {
4650 qdev->mpi_coredump =
4651 vmalloc(sizeof(struct ql_mpi_coredump));
4652 if (qdev->mpi_coredump == NULL) {
4656 if (qlge_force_coredump)
4657 set_bit(QL_FRC_COREDUMP, &qdev->flags);
4659 /* make sure the EEPROM is good */
4660 err = qdev->nic_ops->get_flash(qdev);
4662 dev_err(&pdev->dev, "Invalid FLASH.\n");
4666 /* Keep local copy of current mac address. */
4667 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4669 /* Set up the default ring sizes. */
4670 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4671 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4673 /* Set up the coalescing parameters. */
4674 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4675 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4676 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4677 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4680 * Set up the operating parameters.
4682 qdev->workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
4684 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4685 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4686 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4687 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4688 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4689 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4690 init_completion(&qdev->ide_completion);
4691 mutex_init(&qdev->mpi_mutex);
4694 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4695 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4696 DRV_NAME, DRV_VERSION);
4700 ql_release_all(pdev);
4702 pci_disable_device(pdev);
4706 static const struct net_device_ops qlge_netdev_ops = {
4707 .ndo_open = qlge_open,
4708 .ndo_stop = qlge_close,
4709 .ndo_start_xmit = qlge_send,
4710 .ndo_change_mtu = qlge_change_mtu,
4711 .ndo_get_stats = qlge_get_stats,
4712 .ndo_set_rx_mode = qlge_set_multicast_list,
4713 .ndo_set_mac_address = qlge_set_mac_address,
4714 .ndo_validate_addr = eth_validate_addr,
4715 .ndo_tx_timeout = qlge_tx_timeout,
4716 .ndo_set_features = qlge_set_features,
4717 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4718 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
4721 static void ql_timer(unsigned long data)
4723 struct ql_adapter *qdev = (struct ql_adapter *)data;
4726 var = ql_read32(qdev, STS);
4727 if (pci_channel_offline(qdev->pdev)) {
4728 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4732 mod_timer(&qdev->timer, jiffies + (5*HZ));
4735 static int qlge_probe(struct pci_dev *pdev,
4736 const struct pci_device_id *pci_entry)
4738 struct net_device *ndev = NULL;
4739 struct ql_adapter *qdev = NULL;
4740 static int cards_found = 0;
4743 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4744 min(MAX_CPUS, netif_get_num_default_rss_queues()));
4748 err = ql_init_device(pdev, ndev, cards_found);
4754 qdev = netdev_priv(ndev);
4755 SET_NETDEV_DEV(ndev, &pdev->dev);
4756 ndev->hw_features = NETIF_F_SG |
4760 NETIF_F_HW_VLAN_CTAG_TX |
4761 NETIF_F_HW_VLAN_CTAG_RX |
4762 NETIF_F_HW_VLAN_CTAG_FILTER |
4764 ndev->features = ndev->hw_features;
4765 ndev->vlan_features = ndev->hw_features;
4766 /* vlan gets same features (except vlan filter) */
4767 ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
4768 NETIF_F_HW_VLAN_CTAG_TX |
4769 NETIF_F_HW_VLAN_CTAG_RX);
4771 if (test_bit(QL_DMA64, &qdev->flags))
4772 ndev->features |= NETIF_F_HIGHDMA;
4775 * Set up net_device structure.
4777 ndev->tx_queue_len = qdev->tx_ring_size;
4778 ndev->irq = pdev->irq;
4780 ndev->netdev_ops = &qlge_netdev_ops;
4781 ndev->ethtool_ops = &qlge_ethtool_ops;
4782 ndev->watchdog_timeo = 10 * HZ;
4784 /* MTU range: this driver only supports 1500 or 9000, so this only
4785 * filters out values above or below, and we'll rely on
4786 * qlge_change_mtu to make sure only 1500 or 9000 are allowed
4788 ndev->min_mtu = ETH_DATA_LEN;
4789 ndev->max_mtu = 9000;
4791 err = register_netdev(ndev);
4793 dev_err(&pdev->dev, "net device registration failed.\n");
4794 ql_release_all(pdev);
4795 pci_disable_device(pdev);
4799 /* Start up the timer to trigger EEH if
4802 init_timer_deferrable(&qdev->timer);
4803 qdev->timer.data = (unsigned long)qdev;
4804 qdev->timer.function = ql_timer;
4805 qdev->timer.expires = jiffies + (5*HZ);
4806 add_timer(&qdev->timer);
4808 ql_display_dev_info(ndev);
4809 atomic_set(&qdev->lb_count, 0);
4814 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4816 return qlge_send(skb, ndev);
4819 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4821 return ql_clean_inbound_rx_ring(rx_ring, budget);
4824 static void qlge_remove(struct pci_dev *pdev)
4826 struct net_device *ndev = pci_get_drvdata(pdev);
4827 struct ql_adapter *qdev = netdev_priv(ndev);
4828 del_timer_sync(&qdev->timer);
4829 ql_cancel_all_work_sync(qdev);
4830 unregister_netdev(ndev);
4831 ql_release_all(pdev);
4832 pci_disable_device(pdev);
4836 /* Clean up resources without touching hardware. */
4837 static void ql_eeh_close(struct net_device *ndev)
4840 struct ql_adapter *qdev = netdev_priv(ndev);
4842 if (netif_carrier_ok(ndev)) {
4843 netif_carrier_off(ndev);
4844 netif_stop_queue(ndev);
4847 /* Disabling the timer */
4848 ql_cancel_all_work_sync(qdev);
4850 for (i = 0; i < qdev->rss_ring_count; i++)
4851 netif_napi_del(&qdev->rx_ring[i].napi);
4853 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4854 ql_tx_ring_clean(qdev);
4855 ql_free_rx_buffers(qdev);
4856 ql_release_adapter_resources(qdev);
4860 * This callback is called by the PCI subsystem whenever
4861 * a PCI bus error is detected.
4863 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4864 enum pci_channel_state state)
4866 struct net_device *ndev = pci_get_drvdata(pdev);
4867 struct ql_adapter *qdev = netdev_priv(ndev);
4870 case pci_channel_io_normal:
4871 return PCI_ERS_RESULT_CAN_RECOVER;
4872 case pci_channel_io_frozen:
4873 netif_device_detach(ndev);
4874 del_timer_sync(&qdev->timer);
4875 if (netif_running(ndev))
4877 pci_disable_device(pdev);
4878 return PCI_ERS_RESULT_NEED_RESET;
4879 case pci_channel_io_perm_failure:
4881 "%s: pci_channel_io_perm_failure.\n", __func__);
4882 del_timer_sync(&qdev->timer);
4884 set_bit(QL_EEH_FATAL, &qdev->flags);
4885 return PCI_ERS_RESULT_DISCONNECT;
4888 /* Request a slot reset. */
4889 return PCI_ERS_RESULT_NEED_RESET;
4893 * This callback is called after the PCI buss has been reset.
4894 * Basically, this tries to restart the card from scratch.
4895 * This is a shortened version of the device probe/discovery code,
4896 * it resembles the first-half of the () routine.
4898 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4900 struct net_device *ndev = pci_get_drvdata(pdev);
4901 struct ql_adapter *qdev = netdev_priv(ndev);
4903 pdev->error_state = pci_channel_io_normal;
4905 pci_restore_state(pdev);
4906 if (pci_enable_device(pdev)) {
4907 netif_err(qdev, ifup, qdev->ndev,
4908 "Cannot re-enable PCI device after reset.\n");
4909 return PCI_ERS_RESULT_DISCONNECT;
4911 pci_set_master(pdev);
4913 if (ql_adapter_reset(qdev)) {
4914 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4915 set_bit(QL_EEH_FATAL, &qdev->flags);
4916 return PCI_ERS_RESULT_DISCONNECT;
4919 return PCI_ERS_RESULT_RECOVERED;
4922 static void qlge_io_resume(struct pci_dev *pdev)
4924 struct net_device *ndev = pci_get_drvdata(pdev);
4925 struct ql_adapter *qdev = netdev_priv(ndev);
4928 if (netif_running(ndev)) {
4929 err = qlge_open(ndev);
4931 netif_err(qdev, ifup, qdev->ndev,
4932 "Device initialization failed after reset.\n");
4936 netif_err(qdev, ifup, qdev->ndev,
4937 "Device was not running prior to EEH.\n");
4939 mod_timer(&qdev->timer, jiffies + (5*HZ));
4940 netif_device_attach(ndev);
4943 static const struct pci_error_handlers qlge_err_handler = {
4944 .error_detected = qlge_io_error_detected,
4945 .slot_reset = qlge_io_slot_reset,
4946 .resume = qlge_io_resume,
4949 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4951 struct net_device *ndev = pci_get_drvdata(pdev);
4952 struct ql_adapter *qdev = netdev_priv(ndev);
4955 netif_device_detach(ndev);
4956 del_timer_sync(&qdev->timer);
4958 if (netif_running(ndev)) {
4959 err = ql_adapter_down(qdev);
4965 err = pci_save_state(pdev);
4969 pci_disable_device(pdev);
4971 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4977 static int qlge_resume(struct pci_dev *pdev)
4979 struct net_device *ndev = pci_get_drvdata(pdev);
4980 struct ql_adapter *qdev = netdev_priv(ndev);
4983 pci_set_power_state(pdev, PCI_D0);
4984 pci_restore_state(pdev);
4985 err = pci_enable_device(pdev);
4987 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4990 pci_set_master(pdev);
4992 pci_enable_wake(pdev, PCI_D3hot, 0);
4993 pci_enable_wake(pdev, PCI_D3cold, 0);
4995 if (netif_running(ndev)) {
4996 err = ql_adapter_up(qdev);
5001 mod_timer(&qdev->timer, jiffies + (5*HZ));
5002 netif_device_attach(ndev);
5006 #endif /* CONFIG_PM */
5008 static void qlge_shutdown(struct pci_dev *pdev)
5010 qlge_suspend(pdev, PMSG_SUSPEND);
5013 static struct pci_driver qlge_driver = {
5015 .id_table = qlge_pci_tbl,
5016 .probe = qlge_probe,
5017 .remove = qlge_remove,
5019 .suspend = qlge_suspend,
5020 .resume = qlge_resume,
5022 .shutdown = qlge_shutdown,
5023 .err_handler = &qlge_err_handler
5026 module_pci_driver(qlge_driver);