1 /*************************************************************************
2 * myri10ge.c: Myricom Myri-10G Ethernet driver.
4 * Copyright (C) 2005 - 2011 Myricom, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of Myricom, Inc. nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 * If the eeprom on your board is not recent enough, you will need to get a
33 * newer firmware image at:
34 * http://www.myri.com/scs/download-Myri10GE.html
36 * Contact Information:
38 * Myricom, Inc., 325N Santa Anita Avenue, Arcadia, CA 91006
39 *************************************************************************/
41 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
43 #include <linux/tcp.h>
44 #include <linux/netdevice.h>
45 #include <linux/skbuff.h>
46 #include <linux/string.h>
47 #include <linux/module.h>
48 #include <linux/pci.h>
49 #include <linux/dma-mapping.h>
50 #include <linux/etherdevice.h>
51 #include <linux/if_ether.h>
52 #include <linux/if_vlan.h>
53 #include <linux/dca.h>
55 #include <linux/inet.h>
57 #include <linux/ethtool.h>
58 #include <linux/firmware.h>
59 #include <linux/delay.h>
60 #include <linux/timer.h>
61 #include <linux/vmalloc.h>
62 #include <linux/crc32.h>
63 #include <linux/moduleparam.h>
65 #include <linux/log2.h>
66 #include <linux/slab.h>
67 #include <linux/prefetch.h>
68 #include <net/checksum.h>
71 #include <asm/byteorder.h>
72 #include <asm/processor.h>
73 #include <net/busy_poll.h>
75 #include "myri10ge_mcp.h"
76 #include "myri10ge_mcp_gen_header.h"
78 #define MYRI10GE_VERSION_STR "1.5.3-1.534"
80 MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
81 MODULE_AUTHOR("Maintainer: help@myri.com");
82 MODULE_VERSION(MYRI10GE_VERSION_STR);
83 MODULE_LICENSE("Dual BSD/GPL");
85 #define MYRI10GE_MAX_ETHER_MTU 9014
87 #define MYRI10GE_ETH_STOPPED 0
88 #define MYRI10GE_ETH_STOPPING 1
89 #define MYRI10GE_ETH_STARTING 2
90 #define MYRI10GE_ETH_RUNNING 3
91 #define MYRI10GE_ETH_OPEN_FAILED 4
93 #define MYRI10GE_EEPROM_STRINGS_SIZE 256
94 #define MYRI10GE_MAX_SEND_DESC_TSO ((65536 / 2048) * 2)
96 #define MYRI10GE_NO_CONFIRM_DATA htonl(0xffffffff)
97 #define MYRI10GE_NO_RESPONSE_RESULT 0xffffffff
99 #define MYRI10GE_ALLOC_ORDER 0
100 #define MYRI10GE_ALLOC_SIZE ((1 << MYRI10GE_ALLOC_ORDER) * PAGE_SIZE)
101 #define MYRI10GE_MAX_FRAGS_PER_FRAME (MYRI10GE_MAX_ETHER_MTU/MYRI10GE_ALLOC_SIZE + 1)
103 #define MYRI10GE_MAX_SLICES 32
105 struct myri10ge_rx_buffer_state {
108 DEFINE_DMA_UNMAP_ADDR(bus);
109 DEFINE_DMA_UNMAP_LEN(len);
112 struct myri10ge_tx_buffer_state {
115 DEFINE_DMA_UNMAP_ADDR(bus);
116 DEFINE_DMA_UNMAP_LEN(len);
119 struct myri10ge_cmd {
125 struct myri10ge_rx_buf {
126 struct mcp_kreq_ether_recv __iomem *lanai; /* lanai ptr for recv ring */
127 struct mcp_kreq_ether_recv *shadow; /* host shadow of recv ring */
128 struct myri10ge_rx_buffer_state *info;
135 int mask; /* number of rx slots -1 */
139 struct myri10ge_tx_buf {
140 struct mcp_kreq_ether_send __iomem *lanai; /* lanai ptr for sendq */
141 __be32 __iomem *send_go; /* "go" doorbell ptr */
142 __be32 __iomem *send_stop; /* "stop" doorbell ptr */
143 struct mcp_kreq_ether_send *req_list; /* host shadow of sendq */
145 struct myri10ge_tx_buffer_state *info;
146 int mask; /* number of transmit slots -1 */
147 int req ____cacheline_aligned; /* transmit slots submitted */
148 int pkt_start; /* packets started */
151 int done ____cacheline_aligned; /* transmit slots completed */
152 int pkt_done; /* packets completed */
157 struct myri10ge_rx_done {
158 struct mcp_slot *entry;
164 struct myri10ge_slice_netstats {
165 unsigned long rx_packets;
166 unsigned long tx_packets;
167 unsigned long rx_bytes;
168 unsigned long tx_bytes;
169 unsigned long rx_dropped;
170 unsigned long tx_dropped;
173 struct myri10ge_slice_state {
174 struct myri10ge_tx_buf tx; /* transmit ring */
175 struct myri10ge_rx_buf rx_small;
176 struct myri10ge_rx_buf rx_big;
177 struct myri10ge_rx_done rx_done;
178 struct net_device *dev;
179 struct napi_struct napi;
180 struct myri10ge_priv *mgp;
181 struct myri10ge_slice_netstats stats;
182 __be32 __iomem *irq_claim;
183 struct mcp_irq_data *fw_stats;
184 dma_addr_t fw_stats_bus;
185 int watchdog_tx_done;
187 int watchdog_rx_done;
189 #ifdef CONFIG_MYRI10GE_DCA
192 __be32 __iomem *dca_tag;
197 struct myri10ge_priv {
198 struct myri10ge_slice_state *ss;
199 int tx_boundary; /* boundary transmits cannot cross */
201 int running; /* running? */
205 struct net_device *dev;
208 unsigned long board_span;
209 unsigned long iomem_base;
210 __be32 __iomem *irq_deassert;
211 char *mac_addr_string;
212 struct mcp_cmd_response *cmd;
214 struct pci_dev *pdev;
217 struct msix_entry *msix_vectors;
218 #ifdef CONFIG_MYRI10GE_DCA
223 unsigned int rdma_tags_available;
225 __be32 __iomem *intr_coal_delay_ptr;
228 wait_queue_head_t down_wq;
229 struct work_struct watchdog_work;
230 struct timer_list watchdog_timer;
234 bool fw_name_allocated;
236 char eeprom_strings[MYRI10GE_EEPROM_STRINGS_SIZE];
237 char *product_code_string;
238 char fw_version[128];
242 int adopted_rx_filter_bug;
243 u8 mac_addr[ETH_ALEN]; /* eeprom mac address */
244 unsigned long serial_number;
245 int vendor_specific_offset;
246 int fw_multicast_support;
254 unsigned int board_number;
258 static char *myri10ge_fw_unaligned = "/*(DEBLOBBED)*/";
259 static char *myri10ge_fw_aligned = "/*(DEBLOBBED)*/";
260 static char *myri10ge_fw_rss_unaligned = "/*(DEBLOBBED)*/";
261 static char *myri10ge_fw_rss_aligned = "/*(DEBLOBBED)*/";
264 /* Careful: must be accessed under kernel_param_lock() */
265 static char *myri10ge_fw_name = NULL;
266 module_param(myri10ge_fw_name, charp, 0644);
267 MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name");
269 #define MYRI10GE_MAX_BOARDS 8
270 static char *myri10ge_fw_names[MYRI10GE_MAX_BOARDS] =
271 {[0 ... (MYRI10GE_MAX_BOARDS - 1)] = NULL };
272 module_param_array_named(myri10ge_fw_names, myri10ge_fw_names, charp, NULL,
274 MODULE_PARM_DESC(myri10ge_fw_names, "Firmware image names per board");
276 static int myri10ge_ecrc_enable = 1;
277 module_param(myri10ge_ecrc_enable, int, 0444);
278 MODULE_PARM_DESC(myri10ge_ecrc_enable, "Enable Extended CRC on PCI-E");
280 static int myri10ge_small_bytes = -1; /* -1 == auto */
281 module_param(myri10ge_small_bytes, int, 0644);
282 MODULE_PARM_DESC(myri10ge_small_bytes, "Threshold of small packets");
284 static int myri10ge_msi = 1; /* enable msi by default */
285 module_param(myri10ge_msi, int, 0644);
286 MODULE_PARM_DESC(myri10ge_msi, "Enable Message Signalled Interrupts");
288 static int myri10ge_intr_coal_delay = 75;
289 module_param(myri10ge_intr_coal_delay, int, 0444);
290 MODULE_PARM_DESC(myri10ge_intr_coal_delay, "Interrupt coalescing delay");
292 static int myri10ge_flow_control = 1;
293 module_param(myri10ge_flow_control, int, 0444);
294 MODULE_PARM_DESC(myri10ge_flow_control, "Pause parameter");
296 static int myri10ge_deassert_wait = 1;
297 module_param(myri10ge_deassert_wait, int, 0644);
298 MODULE_PARM_DESC(myri10ge_deassert_wait,
299 "Wait when deasserting legacy interrupts");
301 static int myri10ge_force_firmware = 0;
302 module_param(myri10ge_force_firmware, int, 0444);
303 MODULE_PARM_DESC(myri10ge_force_firmware,
304 "Force firmware to assume aligned completions");
306 static int myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN;
307 module_param(myri10ge_initial_mtu, int, 0444);
308 MODULE_PARM_DESC(myri10ge_initial_mtu, "Initial MTU");
310 static int myri10ge_napi_weight = 64;
311 module_param(myri10ge_napi_weight, int, 0444);
312 MODULE_PARM_DESC(myri10ge_napi_weight, "Set NAPI weight");
314 static int myri10ge_watchdog_timeout = 1;
315 module_param(myri10ge_watchdog_timeout, int, 0444);
316 MODULE_PARM_DESC(myri10ge_watchdog_timeout, "Set watchdog timeout");
318 static int myri10ge_max_irq_loops = 1048576;
319 module_param(myri10ge_max_irq_loops, int, 0444);
320 MODULE_PARM_DESC(myri10ge_max_irq_loops,
321 "Set stuck legacy IRQ detection threshold");
323 #define MYRI10GE_MSG_DEFAULT NETIF_MSG_LINK
325 static int myri10ge_debug = -1; /* defaults above */
326 module_param(myri10ge_debug, int, 0);
327 MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)");
329 static int myri10ge_fill_thresh = 256;
330 module_param(myri10ge_fill_thresh, int, 0644);
331 MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed");
333 static int myri10ge_reset_recover = 1;
335 static int myri10ge_max_slices = 1;
336 module_param(myri10ge_max_slices, int, 0444);
337 MODULE_PARM_DESC(myri10ge_max_slices, "Max tx/rx queues");
339 static int myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT;
340 module_param(myri10ge_rss_hash, int, 0444);
341 MODULE_PARM_DESC(myri10ge_rss_hash, "Type of RSS hashing to do");
343 static int myri10ge_dca = 1;
344 module_param(myri10ge_dca, int, 0444);
345 MODULE_PARM_DESC(myri10ge_dca, "Enable DCA if possible");
347 #define MYRI10GE_FW_OFFSET 1024*1024
348 #define MYRI10GE_HIGHPART_TO_U32(X) \
349 (sizeof (X) == 8) ? ((u32)((u64)(X) >> 32)) : (0)
350 #define MYRI10GE_LOWPART_TO_U32(X) ((u32)(X))
352 #define myri10ge_pio_copy(to,from,size) __iowrite64_copy(to,from,size/8)
354 static void myri10ge_set_multicast_list(struct net_device *dev);
355 static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
356 struct net_device *dev);
358 static inline void put_be32(__be32 val, __be32 __iomem * p)
360 __raw_writel((__force __u32) val, (__force void __iomem *)p);
363 static void myri10ge_get_stats(struct net_device *dev,
364 struct rtnl_link_stats64 *stats);
366 static void set_fw_name(struct myri10ge_priv *mgp, char *name, bool allocated)
368 if (mgp->fw_name_allocated)
371 mgp->fw_name_allocated = allocated;
375 myri10ge_send_cmd(struct myri10ge_priv *mgp, u32 cmd,
376 struct myri10ge_cmd *data, int atomic)
379 char buf_bytes[sizeof(*buf) + 8];
380 struct mcp_cmd_response *response = mgp->cmd;
381 char __iomem *cmd_addr = mgp->sram + MXGEFW_ETH_CMD;
382 u32 dma_low, dma_high, result, value;
385 /* ensure buf is aligned to 8 bytes */
386 buf = (struct mcp_cmd *)ALIGN((unsigned long)buf_bytes, 8);
388 buf->data0 = htonl(data->data0);
389 buf->data1 = htonl(data->data1);
390 buf->data2 = htonl(data->data2);
391 buf->cmd = htonl(cmd);
392 dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus);
393 dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus);
395 buf->response_addr.low = htonl(dma_low);
396 buf->response_addr.high = htonl(dma_high);
397 response->result = htonl(MYRI10GE_NO_RESPONSE_RESULT);
399 myri10ge_pio_copy(cmd_addr, buf, sizeof(*buf));
401 /* wait up to 15ms. Longest command is the DMA benchmark,
402 * which is capped at 5ms, but runs from a timeout handler
403 * that runs every 7.8ms. So a 15ms timeout leaves us with
407 /* if atomic is set, do not sleep,
408 * and try to get the completion quickly
409 * (1ms will be enough for those commands) */
410 for (sleep_total = 0;
411 sleep_total < 1000 &&
412 response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT);
418 /* use msleep for most command */
419 for (sleep_total = 0;
421 response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT);
426 result = ntohl(response->result);
427 value = ntohl(response->data);
428 if (result != MYRI10GE_NO_RESPONSE_RESULT) {
432 } else if (result == MXGEFW_CMD_UNKNOWN) {
434 } else if (result == MXGEFW_CMD_ERROR_UNALIGNED) {
436 } else if (result == MXGEFW_CMD_ERROR_RANGE &&
437 cmd == MXGEFW_CMD_ENABLE_RSS_QUEUES &&
439 data1 & MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES) !=
443 dev_err(&mgp->pdev->dev,
444 "command %d failed, result = %d\n",
450 dev_err(&mgp->pdev->dev, "command %d timed out, result = %d\n",
456 * The eeprom strings on the lanaiX have the format
459 * PT:ddd mmm xx xx:xx:xx xx\0
460 * PV:ddd mmm xx xx:xx:xx xx\0
462 static int myri10ge_read_mac_addr(struct myri10ge_priv *mgp)
467 ptr = mgp->eeprom_strings;
468 limit = mgp->eeprom_strings + MYRI10GE_EEPROM_STRINGS_SIZE;
470 while (*ptr != '\0' && ptr < limit) {
471 if (memcmp(ptr, "MAC=", 4) == 0) {
473 mgp->mac_addr_string = ptr;
474 for (i = 0; i < 6; i++) {
475 if ((ptr + 2) > limit)
478 simple_strtoul(ptr, &ptr, 16);
482 if (memcmp(ptr, "PC=", 3) == 0) {
484 mgp->product_code_string = ptr;
486 if (memcmp((const void *)ptr, "SN=", 3) == 0) {
488 mgp->serial_number = simple_strtoul(ptr, &ptr, 10);
490 while (ptr < limit && *ptr++) ;
496 dev_err(&mgp->pdev->dev, "failed to parse eeprom_strings\n");
501 * Enable or disable periodic RDMAs from the host to make certain
502 * chipsets resend dropped PCIe messages
505 static void myri10ge_dummy_rdma(struct myri10ge_priv *mgp, int enable)
507 char __iomem *submit;
508 __be32 buf[16] __attribute__ ((__aligned__(8)));
509 u32 dma_low, dma_high;
512 /* clear confirmation addr */
516 /* send a rdma command to the PCIe engine, and wait for the
517 * response in the confirmation address. The firmware should
518 * write a -1 there to indicate it is alive and well
520 dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus);
521 dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus);
523 buf[0] = htonl(dma_high); /* confirm addr MSW */
524 buf[1] = htonl(dma_low); /* confirm addr LSW */
525 buf[2] = MYRI10GE_NO_CONFIRM_DATA; /* confirm data */
526 buf[3] = htonl(dma_high); /* dummy addr MSW */
527 buf[4] = htonl(dma_low); /* dummy addr LSW */
528 buf[5] = htonl(enable); /* enable? */
530 submit = mgp->sram + MXGEFW_BOOT_DUMMY_RDMA;
532 myri10ge_pio_copy(submit, &buf, sizeof(buf));
533 for (i = 0; mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 20; i++)
535 if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA)
536 dev_err(&mgp->pdev->dev, "dummy rdma %s failed\n",
537 (enable ? "enable" : "disable"));
541 myri10ge_validate_firmware(struct myri10ge_priv *mgp,
542 struct mcp_gen_header *hdr)
544 struct device *dev = &mgp->pdev->dev;
546 /* check firmware type */
547 if (ntohl(hdr->mcp_type) != MCP_TYPE_ETH) {
548 dev_err(dev, "Bad firmware type: 0x%x\n", ntohl(hdr->mcp_type));
552 /* save firmware version for ethtool */
553 strncpy(mgp->fw_version, hdr->version, sizeof(mgp->fw_version));
554 mgp->fw_version[sizeof(mgp->fw_version) - 1] = '\0';
556 sscanf(mgp->fw_version, "%d.%d.%d", &mgp->fw_ver_major,
557 &mgp->fw_ver_minor, &mgp->fw_ver_tiny);
559 if (!(mgp->fw_ver_major == MXGEFW_VERSION_MAJOR &&
560 mgp->fw_ver_minor == MXGEFW_VERSION_MINOR)) {
561 dev_err(dev, "Found firmware version %s\n", mgp->fw_version);
562 dev_err(dev, "Driver needs %d.%d\n", MXGEFW_VERSION_MAJOR,
563 MXGEFW_VERSION_MINOR);
569 static int myri10ge_load_hotplug_firmware(struct myri10ge_priv *mgp, u32 * size)
571 unsigned crc, reread_crc;
572 const struct firmware *fw;
573 struct device *dev = &mgp->pdev->dev;
574 unsigned char *fw_readback;
575 struct mcp_gen_header *hdr;
580 if ((status = reject_firmware(&fw, mgp->fw_name, dev)) < 0) {
581 dev_err(dev, "Unable to load %s firmware image via hotplug\n",
584 goto abort_with_nothing;
589 if (fw->size >= mgp->sram_size - MYRI10GE_FW_OFFSET ||
590 fw->size < MCP_HEADER_PTR_OFFSET + 4) {
591 dev_err(dev, "Firmware size invalid:%d\n", (int)fw->size);
597 hdr_offset = ntohl(*(__be32 *) (fw->data + MCP_HEADER_PTR_OFFSET));
598 if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > fw->size) {
599 dev_err(dev, "Bad firmware file\n");
603 hdr = (void *)(fw->data + hdr_offset);
605 status = myri10ge_validate_firmware(mgp, hdr);
609 crc = crc32(~0, fw->data, fw->size);
610 for (i = 0; i < fw->size; i += 256) {
611 myri10ge_pio_copy(mgp->sram + MYRI10GE_FW_OFFSET + i,
613 min(256U, (unsigned)(fw->size - i)));
617 fw_readback = vmalloc(fw->size);
622 /* corruption checking is good for parity recovery and buggy chipset */
623 memcpy_fromio(fw_readback, mgp->sram + MYRI10GE_FW_OFFSET, fw->size);
624 reread_crc = crc32(~0, fw_readback, fw->size);
626 if (crc != reread_crc) {
627 dev_err(dev, "CRC failed(fw-len=%u), got 0x%x (expect 0x%x)\n",
628 (unsigned)fw->size, reread_crc, crc);
632 *size = (u32) fw->size;
635 release_firmware(fw);
641 static int myri10ge_adopt_running_firmware(struct myri10ge_priv *mgp)
643 struct mcp_gen_header *hdr;
644 struct device *dev = &mgp->pdev->dev;
645 const size_t bytes = sizeof(struct mcp_gen_header);
649 /* find running firmware header */
650 hdr_offset = swab32(readl(mgp->sram + MCP_HEADER_PTR_OFFSET));
652 if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > mgp->sram_size) {
653 dev_err(dev, "Running firmware has bad header offset (%d)\n",
658 /* copy header of running firmware from SRAM to host memory to
659 * validate firmware */
660 hdr = kmalloc(bytes, GFP_KERNEL);
664 memcpy_fromio(hdr, mgp->sram + hdr_offset, bytes);
665 status = myri10ge_validate_firmware(mgp, hdr);
668 /* check to see if adopted firmware has bug where adopting
669 * it will cause broadcasts to be filtered unless the NIC
670 * is kept in ALLMULTI mode */
671 if (mgp->fw_ver_major == 1 && mgp->fw_ver_minor == 4 &&
672 mgp->fw_ver_tiny >= 4 && mgp->fw_ver_tiny <= 11) {
673 mgp->adopted_rx_filter_bug = 1;
674 dev_warn(dev, "Adopting fw %d.%d.%d: "
675 "working around rx filter bug\n",
676 mgp->fw_ver_major, mgp->fw_ver_minor,
682 static int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp)
684 struct myri10ge_cmd cmd;
687 /* probe for IPv6 TSO support */
688 mgp->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO;
689 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE,
692 mgp->max_tso6 = cmd.data0;
693 mgp->features |= NETIF_F_TSO6;
696 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0);
698 dev_err(&mgp->pdev->dev,
699 "failed MXGEFW_CMD_GET_RX_RING_SIZE\n");
703 mgp->max_intr_slots = 2 * (cmd.data0 / sizeof(struct mcp_dma_addr));
708 static int myri10ge_load_firmware(struct myri10ge_priv *mgp, int adopt)
710 char __iomem *submit;
711 __be32 buf[16] __attribute__ ((__aligned__(8)));
712 u32 dma_low, dma_high, size;
716 status = myri10ge_load_hotplug_firmware(mgp, &size);
720 dev_warn(&mgp->pdev->dev, "hotplug firmware loading failed\n");
722 /* Do not attempt to adopt firmware if there
727 status = myri10ge_adopt_running_firmware(mgp);
729 dev_err(&mgp->pdev->dev,
730 "failed to adopt running firmware\n");
733 dev_info(&mgp->pdev->dev,
734 "Successfully adopted running firmware\n");
735 if (mgp->tx_boundary == 4096) {
736 dev_warn(&mgp->pdev->dev,
737 "Using firmware currently running on NIC"
739 dev_warn(&mgp->pdev->dev,
740 "performance consider loading optimized "
742 dev_warn(&mgp->pdev->dev, "via hotplug\n");
745 set_fw_name(mgp, "adopted", false);
746 mgp->tx_boundary = 2048;
747 myri10ge_dummy_rdma(mgp, 1);
748 status = myri10ge_get_firmware_capabilities(mgp);
752 /* clear confirmation addr */
756 /* send a reload command to the bootstrap MCP, and wait for the
757 * response in the confirmation address. The firmware should
758 * write a -1 there to indicate it is alive and well
760 dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus);
761 dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus);
763 buf[0] = htonl(dma_high); /* confirm addr MSW */
764 buf[1] = htonl(dma_low); /* confirm addr LSW */
765 buf[2] = MYRI10GE_NO_CONFIRM_DATA; /* confirm data */
767 /* FIX: All newest firmware should un-protect the bottom of
768 * the sram before handoff. However, the very first interfaces
769 * do not. Therefore the handoff copy must skip the first 8 bytes
771 buf[3] = htonl(MYRI10GE_FW_OFFSET + 8); /* where the code starts */
772 buf[4] = htonl(size - 8); /* length of code */
773 buf[5] = htonl(8); /* where to copy to */
774 buf[6] = htonl(0); /* where to jump to */
776 submit = mgp->sram + MXGEFW_BOOT_HANDOFF;
778 myri10ge_pio_copy(submit, &buf, sizeof(buf));
783 while (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 9) {
787 if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA) {
788 dev_err(&mgp->pdev->dev, "handoff failed\n");
791 myri10ge_dummy_rdma(mgp, 1);
792 status = myri10ge_get_firmware_capabilities(mgp);
797 static int myri10ge_update_mac_address(struct myri10ge_priv *mgp, u8 * addr)
799 struct myri10ge_cmd cmd;
802 cmd.data0 = ((addr[0] << 24) | (addr[1] << 16)
803 | (addr[2] << 8) | addr[3]);
805 cmd.data1 = ((addr[4] << 8) | (addr[5]));
807 status = myri10ge_send_cmd(mgp, MXGEFW_SET_MAC_ADDRESS, &cmd, 0);
811 static int myri10ge_change_pause(struct myri10ge_priv *mgp, int pause)
813 struct myri10ge_cmd cmd;
816 ctl = pause ? MXGEFW_ENABLE_FLOW_CONTROL : MXGEFW_DISABLE_FLOW_CONTROL;
817 status = myri10ge_send_cmd(mgp, ctl, &cmd, 0);
820 netdev_err(mgp->dev, "Failed to set flow control mode\n");
828 myri10ge_change_promisc(struct myri10ge_priv *mgp, int promisc, int atomic)
830 struct myri10ge_cmd cmd;
833 ctl = promisc ? MXGEFW_ENABLE_PROMISC : MXGEFW_DISABLE_PROMISC;
834 status = myri10ge_send_cmd(mgp, ctl, &cmd, atomic);
836 netdev_err(mgp->dev, "Failed to set promisc mode\n");
839 static int myri10ge_dma_test(struct myri10ge_priv *mgp, int test_type)
841 struct myri10ge_cmd cmd;
844 struct page *dmatest_page;
845 dma_addr_t dmatest_bus;
848 dmatest_page = alloc_page(GFP_KERNEL);
851 dmatest_bus = pci_map_page(mgp->pdev, dmatest_page, 0, PAGE_SIZE,
853 if (unlikely(pci_dma_mapping_error(mgp->pdev, dmatest_bus))) {
854 __free_page(dmatest_page);
858 /* Run a small DMA test.
859 * The magic multipliers to the length tell the firmware
860 * to do DMA read, write, or read+write tests. The
861 * results are returned in cmd.data0. The upper 16
862 * bits or the return is the number of transfers completed.
863 * The lower 16 bits is the time in 0.5us ticks that the
864 * transfers took to complete.
867 len = mgp->tx_boundary;
869 cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus);
870 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus);
871 cmd.data2 = len * 0x10000;
872 status = myri10ge_send_cmd(mgp, test_type, &cmd, 0);
877 mgp->read_dma = ((cmd.data0 >> 16) * len * 2) / (cmd.data0 & 0xffff);
878 cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus);
879 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus);
880 cmd.data2 = len * 0x1;
881 status = myri10ge_send_cmd(mgp, test_type, &cmd, 0);
886 mgp->write_dma = ((cmd.data0 >> 16) * len * 2) / (cmd.data0 & 0xffff);
888 cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus);
889 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus);
890 cmd.data2 = len * 0x10001;
891 status = myri10ge_send_cmd(mgp, test_type, &cmd, 0);
896 mgp->read_write_dma = ((cmd.data0 >> 16) * len * 2 * 2) /
897 (cmd.data0 & 0xffff);
900 pci_unmap_page(mgp->pdev, dmatest_bus, PAGE_SIZE, DMA_BIDIRECTIONAL);
901 put_page(dmatest_page);
903 if (status != 0 && test_type != MXGEFW_CMD_UNALIGNED_TEST)
904 dev_warn(&mgp->pdev->dev, "DMA %s benchmark failed: %d\n",
910 static int myri10ge_reset(struct myri10ge_priv *mgp)
912 struct myri10ge_cmd cmd;
913 struct myri10ge_slice_state *ss;
916 #ifdef CONFIG_MYRI10GE_DCA
917 unsigned long dca_tag_off;
920 /* try to send a reset command to the card to see if it
922 memset(&cmd, 0, sizeof(cmd));
923 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_RESET, &cmd, 0);
925 dev_err(&mgp->pdev->dev, "failed reset\n");
929 (void)myri10ge_dma_test(mgp, MXGEFW_DMA_TEST);
931 * Use non-ndis mcp_slot (eg, 4 bytes total,
932 * no toeplitz hash value returned. Older firmware will
933 * not understand this command, but will use the correct
934 * sized mcp_slot, so we ignore error returns
936 cmd.data0 = MXGEFW_RSS_MCP_SLOT_TYPE_MIN;
937 (void)myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_MCP_SLOT_TYPE, &cmd, 0);
939 /* Now exchange information about interrupts */
941 bytes = mgp->max_intr_slots * sizeof(*mgp->ss[0].rx_done.entry);
942 cmd.data0 = (u32) bytes;
943 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0);
946 * Even though we already know how many slices are supported
947 * via myri10ge_probe_slices() MXGEFW_CMD_GET_MAX_RSS_QUEUES
948 * has magic side effects, and must be called after a reset.
949 * It must be called prior to calling any RSS related cmds,
950 * including assigning an interrupt queue for anything but
951 * slice 0. It must also be called *after*
952 * MXGEFW_CMD_SET_INTRQ_SIZE, since the intrq size is used by
953 * the firmware to compute offsets.
956 if (mgp->num_slices > 1) {
958 /* ask the maximum number of slices it supports */
959 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_RSS_QUEUES,
962 dev_err(&mgp->pdev->dev,
963 "failed to get number of slices\n");
967 * MXGEFW_CMD_ENABLE_RSS_QUEUES must be called prior
968 * to setting up the interrupt queue DMA
971 cmd.data0 = mgp->num_slices;
972 cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE;
973 if (mgp->dev->real_num_tx_queues > 1)
974 cmd.data1 |= MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES;
975 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES,
978 /* Firmware older than 1.4.32 only supports multiple
979 * RX queues, so if we get an error, first retry using a
980 * single TX queue before giving up */
981 if (status != 0 && mgp->dev->real_num_tx_queues > 1) {
982 netif_set_real_num_tx_queues(mgp->dev, 1);
983 cmd.data0 = mgp->num_slices;
984 cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE;
985 status = myri10ge_send_cmd(mgp,
986 MXGEFW_CMD_ENABLE_RSS_QUEUES,
991 dev_err(&mgp->pdev->dev,
992 "failed to set number of slices\n");
997 for (i = 0; i < mgp->num_slices; i++) {
999 cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->rx_done.bus);
1000 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->rx_done.bus);
1002 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_DMA,
1007 myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0);
1008 for (i = 0; i < mgp->num_slices; i++) {
1011 (__iomem __be32 *) (mgp->sram + cmd.data0 + 8 * i);
1013 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET,
1015 mgp->irq_deassert = (__iomem __be32 *) (mgp->sram + cmd.data0);
1017 status |= myri10ge_send_cmd
1018 (mgp, MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET, &cmd, 0);
1019 mgp->intr_coal_delay_ptr = (__iomem __be32 *) (mgp->sram + cmd.data0);
1021 dev_err(&mgp->pdev->dev, "failed set interrupt parameters\n");
1024 put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
1026 #ifdef CONFIG_MYRI10GE_DCA
1027 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_DCA_OFFSET, &cmd, 0);
1028 dca_tag_off = cmd.data0;
1029 for (i = 0; i < mgp->num_slices; i++) {
1032 ss->dca_tag = (__iomem __be32 *)
1033 (mgp->sram + dca_tag_off + 4 * i);
1038 #endif /* CONFIG_MYRI10GE_DCA */
1040 /* reset mcp/driver shared state back to 0 */
1042 mgp->link_changes = 0;
1043 for (i = 0; i < mgp->num_slices; i++) {
1046 memset(ss->rx_done.entry, 0, bytes);
1049 ss->tx.pkt_start = 0;
1050 ss->tx.pkt_done = 0;
1052 ss->rx_small.cnt = 0;
1053 ss->rx_done.idx = 0;
1054 ss->rx_done.cnt = 0;
1055 ss->tx.wake_queue = 0;
1056 ss->tx.stop_queue = 0;
1059 status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr);
1060 myri10ge_change_pause(mgp, mgp->pause);
1061 myri10ge_set_multicast_list(mgp->dev);
1065 #ifdef CONFIG_MYRI10GE_DCA
1066 static int myri10ge_toggle_relaxed(struct pci_dev *pdev, int on)
1071 pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &ctl);
1073 ret = (ctl & PCI_EXP_DEVCTL_RELAX_EN) >> 4;
1075 ctl &= ~PCI_EXP_DEVCTL_RELAX_EN;
1077 pcie_capability_write_word(pdev, PCI_EXP_DEVCTL, ctl);
1083 myri10ge_write_dca(struct myri10ge_slice_state *ss, int cpu, int tag)
1085 ss->cached_dca_tag = tag;
1086 put_be32(htonl(tag), ss->dca_tag);
1089 static inline void myri10ge_update_dca(struct myri10ge_slice_state *ss)
1091 int cpu = get_cpu();
1094 if (cpu != ss->cpu) {
1095 tag = dca3_get_tag(&ss->mgp->pdev->dev, cpu);
1096 if (ss->cached_dca_tag != tag)
1097 myri10ge_write_dca(ss, cpu, tag);
1103 static void myri10ge_setup_dca(struct myri10ge_priv *mgp)
1106 struct pci_dev *pdev = mgp->pdev;
1108 if (mgp->ss[0].dca_tag == NULL || mgp->dca_enabled)
1110 if (!myri10ge_dca) {
1111 dev_err(&pdev->dev, "dca disabled by administrator\n");
1114 err = dca_add_requester(&pdev->dev);
1118 "dca_add_requester() failed, err=%d\n", err);
1121 mgp->relaxed_order = myri10ge_toggle_relaxed(pdev, 0);
1122 mgp->dca_enabled = 1;
1123 for (i = 0; i < mgp->num_slices; i++) {
1124 mgp->ss[i].cpu = -1;
1125 mgp->ss[i].cached_dca_tag = -1;
1126 myri10ge_update_dca(&mgp->ss[i]);
1130 static void myri10ge_teardown_dca(struct myri10ge_priv *mgp)
1132 struct pci_dev *pdev = mgp->pdev;
1134 if (!mgp->dca_enabled)
1136 mgp->dca_enabled = 0;
1137 if (mgp->relaxed_order)
1138 myri10ge_toggle_relaxed(pdev, 1);
1139 dca_remove_requester(&pdev->dev);
1142 static int myri10ge_notify_dca_device(struct device *dev, void *data)
1144 struct myri10ge_priv *mgp;
1145 unsigned long event;
1147 mgp = dev_get_drvdata(dev);
1148 event = *(unsigned long *)data;
1150 if (event == DCA_PROVIDER_ADD)
1151 myri10ge_setup_dca(mgp);
1152 else if (event == DCA_PROVIDER_REMOVE)
1153 myri10ge_teardown_dca(mgp);
1156 #endif /* CONFIG_MYRI10GE_DCA */
1159 myri10ge_submit_8rx(struct mcp_kreq_ether_recv __iomem * dst,
1160 struct mcp_kreq_ether_recv *src)
1164 low = src->addr_low;
1165 src->addr_low = htonl(DMA_BIT_MASK(32));
1166 myri10ge_pio_copy(dst, src, 4 * sizeof(*src));
1168 myri10ge_pio_copy(dst + 4, src + 4, 4 * sizeof(*src));
1170 src->addr_low = low;
1171 put_be32(low, &dst->addr_low);
1175 static inline void myri10ge_vlan_ip_csum(struct sk_buff *skb, __wsum hw_csum)
1177 struct vlan_hdr *vh = (struct vlan_hdr *)(skb->data);
1179 if ((skb->protocol == htons(ETH_P_8021Q)) &&
1180 (vh->h_vlan_encapsulated_proto == htons(ETH_P_IP) ||
1181 vh->h_vlan_encapsulated_proto == htons(ETH_P_IPV6))) {
1182 skb->csum = hw_csum;
1183 skb->ip_summed = CHECKSUM_COMPLETE;
1188 myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
1189 int bytes, int watchdog)
1194 #if MYRI10GE_ALLOC_SIZE > 4096
1198 if (unlikely(rx->watchdog_needed && !watchdog))
1201 /* try to refill entire ring */
1202 while (rx->fill_cnt != (rx->cnt + rx->mask + 1)) {
1203 idx = rx->fill_cnt & rx->mask;
1204 if (rx->page_offset + bytes <= MYRI10GE_ALLOC_SIZE) {
1205 /* we can use part of previous page */
1208 /* we need a new page */
1210 alloc_pages(GFP_ATOMIC | __GFP_COMP,
1211 MYRI10GE_ALLOC_ORDER);
1212 if (unlikely(page == NULL)) {
1213 if (rx->fill_cnt - rx->cnt < 16)
1214 rx->watchdog_needed = 1;
1218 bus = pci_map_page(mgp->pdev, page, 0,
1219 MYRI10GE_ALLOC_SIZE,
1220 PCI_DMA_FROMDEVICE);
1221 if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) {
1222 __free_pages(page, MYRI10GE_ALLOC_ORDER);
1223 if (rx->fill_cnt - rx->cnt < 16)
1224 rx->watchdog_needed = 1;
1229 rx->page_offset = 0;
1233 rx->info[idx].page = rx->page;
1234 rx->info[idx].page_offset = rx->page_offset;
1235 /* note that this is the address of the start of the
1237 dma_unmap_addr_set(&rx->info[idx], bus, rx->bus);
1238 rx->shadow[idx].addr_low =
1239 htonl(MYRI10GE_LOWPART_TO_U32(rx->bus) + rx->page_offset);
1240 rx->shadow[idx].addr_high =
1241 htonl(MYRI10GE_HIGHPART_TO_U32(rx->bus));
1243 /* start next packet on a cacheline boundary */
1244 rx->page_offset += SKB_DATA_ALIGN(bytes);
1246 #if MYRI10GE_ALLOC_SIZE > 4096
1247 /* don't cross a 4KB boundary */
1248 end_offset = rx->page_offset + bytes - 1;
1249 if ((unsigned)(rx->page_offset ^ end_offset) > 4095)
1250 rx->page_offset = end_offset & ~4095;
1254 /* copy 8 descriptors to the firmware at a time */
1255 if ((idx & 7) == 7) {
1256 myri10ge_submit_8rx(&rx->lanai[idx - 7],
1257 &rx->shadow[idx - 7]);
1263 myri10ge_unmap_rx_page(struct pci_dev *pdev,
1264 struct myri10ge_rx_buffer_state *info, int bytes)
1266 /* unmap the recvd page if we're the only or last user of it */
1267 if (bytes >= MYRI10GE_ALLOC_SIZE / 2 ||
1268 (info->page_offset + 2 * bytes) > MYRI10GE_ALLOC_SIZE) {
1269 pci_unmap_page(pdev, (dma_unmap_addr(info, bus)
1270 & ~(MYRI10GE_ALLOC_SIZE - 1)),
1271 MYRI10GE_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
1276 * GRO does not support acceleration of tagged vlan frames, and
1277 * this NIC does not support vlan tag offload, so we must pop
1278 * the tag ourselves to be able to achieve GRO performance that
1279 * is comparable to LRO.
1283 myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb)
1286 struct vlan_ethhdr *veh;
1287 struct skb_frag_struct *frag;
1292 veh = (struct vlan_ethhdr *)va;
1293 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
1294 NETIF_F_HW_VLAN_CTAG_RX &&
1295 veh->h_vlan_proto == htons(ETH_P_8021Q)) {
1296 /* fixup csum if needed */
1297 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1298 vsum = csum_partial(va + ETH_HLEN, VLAN_HLEN, 0);
1299 skb->csum = csum_sub(skb->csum, vsum);
1302 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(veh->h_vlan_TCI));
1303 memmove(va + VLAN_HLEN, va, 2 * ETH_ALEN);
1304 skb->len -= VLAN_HLEN;
1305 skb->data_len -= VLAN_HLEN;
1306 frag = skb_shinfo(skb)->frags;
1307 frag->page_offset += VLAN_HLEN;
1308 skb_frag_size_set(frag, skb_frag_size(frag) - VLAN_HLEN);
1312 #define MYRI10GE_HLEN 64 /* Bytes to copy from page to skb linear memory */
1315 myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
1317 struct myri10ge_priv *mgp = ss->mgp;
1318 struct sk_buff *skb;
1319 struct skb_frag_struct *rx_frags;
1320 struct myri10ge_rx_buf *rx;
1321 int i, idx, remainder, bytes;
1322 struct pci_dev *pdev = mgp->pdev;
1323 struct net_device *dev = mgp->dev;
1326 if (len <= mgp->small_bytes) {
1328 bytes = mgp->small_bytes;
1331 bytes = mgp->big_bytes;
1335 idx = rx->cnt & rx->mask;
1336 va = page_address(rx->info[idx].page) + rx->info[idx].page_offset;
1339 skb = napi_get_frags(&ss->napi);
1340 if (unlikely(skb == NULL)) {
1341 ss->stats.rx_dropped++;
1342 for (i = 0, remainder = len; remainder > 0; i++) {
1343 myri10ge_unmap_rx_page(pdev, &rx->info[idx], bytes);
1344 put_page(rx->info[idx].page);
1346 idx = rx->cnt & rx->mask;
1347 remainder -= MYRI10GE_ALLOC_SIZE;
1351 rx_frags = skb_shinfo(skb)->frags;
1352 /* Fill skb_frag_struct(s) with data from our receive */
1353 for (i = 0, remainder = len; remainder > 0; i++) {
1354 myri10ge_unmap_rx_page(pdev, &rx->info[idx], bytes);
1355 skb_fill_page_desc(skb, i, rx->info[idx].page,
1356 rx->info[idx].page_offset,
1357 remainder < MYRI10GE_ALLOC_SIZE ?
1358 remainder : MYRI10GE_ALLOC_SIZE);
1360 idx = rx->cnt & rx->mask;
1361 remainder -= MYRI10GE_ALLOC_SIZE;
1364 /* remove padding */
1365 rx_frags[0].page_offset += MXGEFW_PAD;
1366 rx_frags[0].size -= MXGEFW_PAD;
1370 skb->data_len = len;
1371 skb->truesize += len;
1372 if (dev->features & NETIF_F_RXCSUM) {
1373 skb->ip_summed = CHECKSUM_COMPLETE;
1376 myri10ge_vlan_rx(mgp->dev, va, skb);
1377 skb_record_rx_queue(skb, ss - &mgp->ss[0]);
1379 napi_gro_frags(&ss->napi);
1385 myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index)
1387 struct pci_dev *pdev = ss->mgp->pdev;
1388 struct myri10ge_tx_buf *tx = &ss->tx;
1389 struct netdev_queue *dev_queue;
1390 struct sk_buff *skb;
1393 while (tx->pkt_done != mcp_index) {
1394 idx = tx->done & tx->mask;
1395 skb = tx->info[idx].skb;
1398 tx->info[idx].skb = NULL;
1399 if (tx->info[idx].last) {
1401 tx->info[idx].last = 0;
1404 len = dma_unmap_len(&tx->info[idx], len);
1405 dma_unmap_len_set(&tx->info[idx], len, 0);
1407 ss->stats.tx_bytes += skb->len;
1408 ss->stats.tx_packets++;
1409 dev_kfree_skb_irq(skb);
1411 pci_unmap_single(pdev,
1412 dma_unmap_addr(&tx->info[idx],
1417 pci_unmap_page(pdev,
1418 dma_unmap_addr(&tx->info[idx],
1424 dev_queue = netdev_get_tx_queue(ss->dev, ss - ss->mgp->ss);
1426 * Make a minimal effort to prevent the NIC from polling an
1427 * idle tx queue. If we can't get the lock we leave the queue
1428 * active. In this case, either a thread was about to start
1429 * using the queue anyway, or we lost a race and the NIC will
1430 * waste some of its resources polling an inactive queue for a
1434 if ((ss->mgp->dev->real_num_tx_queues > 1) &&
1435 __netif_tx_trylock(dev_queue)) {
1436 if (tx->req == tx->done) {
1437 tx->queue_active = 0;
1438 put_be32(htonl(1), tx->send_stop);
1442 __netif_tx_unlock(dev_queue);
1445 /* start the queue if we've stopped it */
1446 if (netif_tx_queue_stopped(dev_queue) &&
1447 tx->req - tx->done < (tx->mask >> 1) &&
1448 ss->mgp->running == MYRI10GE_ETH_RUNNING) {
1450 netif_tx_wake_queue(dev_queue);
1455 myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget)
1457 struct myri10ge_rx_done *rx_done = &ss->rx_done;
1458 struct myri10ge_priv *mgp = ss->mgp;
1459 unsigned long rx_bytes = 0;
1460 unsigned long rx_packets = 0;
1461 unsigned long rx_ok;
1462 int idx = rx_done->idx;
1463 int cnt = rx_done->cnt;
1468 while (rx_done->entry[idx].length != 0 && work_done < budget) {
1469 length = ntohs(rx_done->entry[idx].length);
1470 rx_done->entry[idx].length = 0;
1471 checksum = csum_unfold(rx_done->entry[idx].checksum);
1472 rx_ok = myri10ge_rx_done(ss, length, checksum);
1473 rx_packets += rx_ok;
1474 rx_bytes += rx_ok * (unsigned long)length;
1476 idx = cnt & (mgp->max_intr_slots - 1);
1481 ss->stats.rx_packets += rx_packets;
1482 ss->stats.rx_bytes += rx_bytes;
1484 /* restock receive rings if needed */
1485 if (ss->rx_small.fill_cnt - ss->rx_small.cnt < myri10ge_fill_thresh)
1486 myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
1487 mgp->small_bytes + MXGEFW_PAD, 0);
1488 if (ss->rx_big.fill_cnt - ss->rx_big.cnt < myri10ge_fill_thresh)
1489 myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0);
1494 static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp)
1496 struct mcp_irq_data *stats = mgp->ss[0].fw_stats;
1498 if (unlikely(stats->stats_updated)) {
1499 unsigned link_up = ntohl(stats->link_up);
1500 if (mgp->link_state != link_up) {
1501 mgp->link_state = link_up;
1503 if (mgp->link_state == MXGEFW_LINK_UP) {
1504 netif_info(mgp, link, mgp->dev, "link up\n");
1505 netif_carrier_on(mgp->dev);
1506 mgp->link_changes++;
1508 netif_info(mgp, link, mgp->dev, "link %s\n",
1509 (link_up == MXGEFW_LINK_MYRINET ?
1510 "mismatch (Myrinet detected)" :
1512 netif_carrier_off(mgp->dev);
1513 mgp->link_changes++;
1516 if (mgp->rdma_tags_available !=
1517 ntohl(stats->rdma_tags_available)) {
1518 mgp->rdma_tags_available =
1519 ntohl(stats->rdma_tags_available);
1520 netdev_warn(mgp->dev, "RDMA timed out! %d tags left\n",
1521 mgp->rdma_tags_available);
1523 mgp->down_cnt += stats->link_down;
1524 if (stats->link_down)
1525 wake_up(&mgp->down_wq);
1529 static int myri10ge_poll(struct napi_struct *napi, int budget)
1531 struct myri10ge_slice_state *ss =
1532 container_of(napi, struct myri10ge_slice_state, napi);
1535 #ifdef CONFIG_MYRI10GE_DCA
1536 if (ss->mgp->dca_enabled)
1537 myri10ge_update_dca(ss);
1539 /* process as many rx events as NAPI will allow */
1540 work_done = myri10ge_clean_rx_done(ss, budget);
1542 if (work_done < budget) {
1543 napi_complete_done(napi, work_done);
1544 put_be32(htonl(3), ss->irq_claim);
1549 static irqreturn_t myri10ge_intr(int irq, void *arg)
1551 struct myri10ge_slice_state *ss = arg;
1552 struct myri10ge_priv *mgp = ss->mgp;
1553 struct mcp_irq_data *stats = ss->fw_stats;
1554 struct myri10ge_tx_buf *tx = &ss->tx;
1555 u32 send_done_count;
1558 /* an interrupt on a non-zero receive-only slice is implicitly
1559 * valid since MSI-X irqs are not shared */
1560 if ((mgp->dev->real_num_tx_queues == 1) && (ss != mgp->ss)) {
1561 napi_schedule(&ss->napi);
1565 /* make sure it is our IRQ, and that the DMA has finished */
1566 if (unlikely(!stats->valid))
1569 /* low bit indicates receives are present, so schedule
1570 * napi poll handler */
1571 if (stats->valid & 1)
1572 napi_schedule(&ss->napi);
1574 if (!mgp->msi_enabled && !mgp->msix_enabled) {
1575 put_be32(0, mgp->irq_deassert);
1576 if (!myri10ge_deassert_wait)
1582 /* Wait for IRQ line to go low, if using INTx */
1586 /* check for transmit completes and receives */
1587 send_done_count = ntohl(stats->send_done_count);
1588 if (send_done_count != tx->pkt_done)
1589 myri10ge_tx_done(ss, (int)send_done_count);
1590 if (unlikely(i > myri10ge_max_irq_loops)) {
1591 netdev_warn(mgp->dev, "irq stuck?\n");
1593 schedule_work(&mgp->watchdog_work);
1595 if (likely(stats->valid == 0))
1601 /* Only slice 0 updates stats */
1603 myri10ge_check_statblock(mgp);
1605 put_be32(htonl(3), ss->irq_claim + 1);
1610 myri10ge_get_link_ksettings(struct net_device *netdev,
1611 struct ethtool_link_ksettings *cmd)
1613 struct myri10ge_priv *mgp = netdev_priv(netdev);
1617 cmd->base.autoneg = AUTONEG_DISABLE;
1618 cmd->base.speed = SPEED_10000;
1619 cmd->base.duplex = DUPLEX_FULL;
1622 * parse the product code to deterimine the interface type
1623 * (CX4, XFP, Quad Ribbon Fiber) by looking at the character
1624 * after the 3rd dash in the driver's cached copy of the
1625 * EEPROM's product code string.
1627 ptr = mgp->product_code_string;
1629 netdev_err(netdev, "Missing product code\n");
1632 for (i = 0; i < 3; i++, ptr++) {
1633 ptr = strchr(ptr, '-');
1635 netdev_err(netdev, "Invalid product code %s\n",
1636 mgp->product_code_string);
1642 if (*ptr == 'R' || *ptr == 'Q' || *ptr == 'S') {
1643 /* We've found either an XFP, quad ribbon fiber, or SFP+ */
1644 cmd->base.port = PORT_FIBRE;
1645 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
1646 ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
1648 cmd->base.port = PORT_OTHER;
1655 myri10ge_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
1657 struct myri10ge_priv *mgp = netdev_priv(netdev);
1659 strlcpy(info->driver, "myri10ge", sizeof(info->driver));
1660 strlcpy(info->version, MYRI10GE_VERSION_STR, sizeof(info->version));
1661 strlcpy(info->fw_version, mgp->fw_version, sizeof(info->fw_version));
1662 strlcpy(info->bus_info, pci_name(mgp->pdev), sizeof(info->bus_info));
1666 myri10ge_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal)
1668 struct myri10ge_priv *mgp = netdev_priv(netdev);
1670 coal->rx_coalesce_usecs = mgp->intr_coal_delay;
1675 myri10ge_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal)
1677 struct myri10ge_priv *mgp = netdev_priv(netdev);
1679 mgp->intr_coal_delay = coal->rx_coalesce_usecs;
1680 put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
1685 myri10ge_get_pauseparam(struct net_device *netdev,
1686 struct ethtool_pauseparam *pause)
1688 struct myri10ge_priv *mgp = netdev_priv(netdev);
1691 pause->rx_pause = mgp->pause;
1692 pause->tx_pause = mgp->pause;
1696 myri10ge_set_pauseparam(struct net_device *netdev,
1697 struct ethtool_pauseparam *pause)
1699 struct myri10ge_priv *mgp = netdev_priv(netdev);
1701 if (pause->tx_pause != mgp->pause)
1702 return myri10ge_change_pause(mgp, pause->tx_pause);
1703 if (pause->rx_pause != mgp->pause)
1704 return myri10ge_change_pause(mgp, pause->rx_pause);
1705 if (pause->autoneg != 0)
1711 myri10ge_get_ringparam(struct net_device *netdev,
1712 struct ethtool_ringparam *ring)
1714 struct myri10ge_priv *mgp = netdev_priv(netdev);
1716 ring->rx_mini_max_pending = mgp->ss[0].rx_small.mask + 1;
1717 ring->rx_max_pending = mgp->ss[0].rx_big.mask + 1;
1718 ring->rx_jumbo_max_pending = 0;
1719 ring->tx_max_pending = mgp->ss[0].tx.mask + 1;
1720 ring->rx_mini_pending = ring->rx_mini_max_pending;
1721 ring->rx_pending = ring->rx_max_pending;
1722 ring->rx_jumbo_pending = ring->rx_jumbo_max_pending;
1723 ring->tx_pending = ring->tx_max_pending;
1726 static const char myri10ge_gstrings_main_stats[][ETH_GSTRING_LEN] = {
1727 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
1728 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
1729 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
1730 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
1731 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
1732 "tx_heartbeat_errors", "tx_window_errors",
1733 /* device-specific stats */
1734 "tx_boundary", "irq", "MSI", "MSIX",
1735 "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs",
1736 "serial_number", "watchdog_resets",
1737 #ifdef CONFIG_MYRI10GE_DCA
1738 "dca_capable_firmware", "dca_device_present",
1740 "link_changes", "link_up", "dropped_link_overflow",
1741 "dropped_link_error_or_filtered",
1742 "dropped_pause", "dropped_bad_phy", "dropped_bad_crc32",
1743 "dropped_unicast_filtered", "dropped_multicast_filtered",
1744 "dropped_runt", "dropped_overrun", "dropped_no_small_buffer",
1745 "dropped_no_big_buffer"
1748 static const char myri10ge_gstrings_slice_stats[][ETH_GSTRING_LEN] = {
1749 "----------- slice ---------",
1750 "tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done",
1751 "rx_small_cnt", "rx_big_cnt",
1752 "wake_queue", "stop_queue", "tx_linearized",
1755 #define MYRI10GE_NET_STATS_LEN 21
1756 #define MYRI10GE_MAIN_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_main_stats)
1757 #define MYRI10GE_SLICE_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_slice_stats)
1760 myri10ge_get_strings(struct net_device *netdev, u32 stringset, u8 * data)
1762 struct myri10ge_priv *mgp = netdev_priv(netdev);
1765 switch (stringset) {
1767 memcpy(data, *myri10ge_gstrings_main_stats,
1768 sizeof(myri10ge_gstrings_main_stats));
1769 data += sizeof(myri10ge_gstrings_main_stats);
1770 for (i = 0; i < mgp->num_slices; i++) {
1771 memcpy(data, *myri10ge_gstrings_slice_stats,
1772 sizeof(myri10ge_gstrings_slice_stats));
1773 data += sizeof(myri10ge_gstrings_slice_stats);
1779 static int myri10ge_get_sset_count(struct net_device *netdev, int sset)
1781 struct myri10ge_priv *mgp = netdev_priv(netdev);
1785 return MYRI10GE_MAIN_STATS_LEN +
1786 mgp->num_slices * MYRI10GE_SLICE_STATS_LEN;
1793 myri10ge_get_ethtool_stats(struct net_device *netdev,
1794 struct ethtool_stats *stats, u64 * data)
1796 struct myri10ge_priv *mgp = netdev_priv(netdev);
1797 struct myri10ge_slice_state *ss;
1798 struct rtnl_link_stats64 link_stats;
1802 /* force stats update */
1803 memset(&link_stats, 0, sizeof(link_stats));
1804 (void)myri10ge_get_stats(netdev, &link_stats);
1805 for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++)
1806 data[i] = ((u64 *)&link_stats)[i];
1808 data[i++] = (unsigned int)mgp->tx_boundary;
1809 data[i++] = (unsigned int)mgp->pdev->irq;
1810 data[i++] = (unsigned int)mgp->msi_enabled;
1811 data[i++] = (unsigned int)mgp->msix_enabled;
1812 data[i++] = (unsigned int)mgp->read_dma;
1813 data[i++] = (unsigned int)mgp->write_dma;
1814 data[i++] = (unsigned int)mgp->read_write_dma;
1815 data[i++] = (unsigned int)mgp->serial_number;
1816 data[i++] = (unsigned int)mgp->watchdog_resets;
1817 #ifdef CONFIG_MYRI10GE_DCA
1818 data[i++] = (unsigned int)(mgp->ss[0].dca_tag != NULL);
1819 data[i++] = (unsigned int)(mgp->dca_enabled);
1821 data[i++] = (unsigned int)mgp->link_changes;
1823 /* firmware stats are useful only in the first slice */
1825 data[i++] = (unsigned int)ntohl(ss->fw_stats->link_up);
1826 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_link_overflow);
1828 (unsigned int)ntohl(ss->fw_stats->dropped_link_error_or_filtered);
1829 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_pause);
1830 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_phy);
1831 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_crc32);
1832 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_unicast_filtered);
1834 (unsigned int)ntohl(ss->fw_stats->dropped_multicast_filtered);
1835 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_runt);
1836 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_overrun);
1837 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_small_buffer);
1838 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_big_buffer);
1840 for (slice = 0; slice < mgp->num_slices; slice++) {
1841 ss = &mgp->ss[slice];
1843 data[i++] = (unsigned int)ss->tx.pkt_start;
1844 data[i++] = (unsigned int)ss->tx.pkt_done;
1845 data[i++] = (unsigned int)ss->tx.req;
1846 data[i++] = (unsigned int)ss->tx.done;
1847 data[i++] = (unsigned int)ss->rx_small.cnt;
1848 data[i++] = (unsigned int)ss->rx_big.cnt;
1849 data[i++] = (unsigned int)ss->tx.wake_queue;
1850 data[i++] = (unsigned int)ss->tx.stop_queue;
1851 data[i++] = (unsigned int)ss->tx.linearized;
1855 static void myri10ge_set_msglevel(struct net_device *netdev, u32 value)
1857 struct myri10ge_priv *mgp = netdev_priv(netdev);
1858 mgp->msg_enable = value;
1861 static u32 myri10ge_get_msglevel(struct net_device *netdev)
1863 struct myri10ge_priv *mgp = netdev_priv(netdev);
1864 return mgp->msg_enable;
1868 * Use a low-level command to change the LED behavior. Rather than
1869 * blinking (which is the normal case), when identify is used, the
1870 * yellow LED turns solid.
1872 static int myri10ge_led(struct myri10ge_priv *mgp, int on)
1874 struct mcp_gen_header *hdr;
1875 struct device *dev = &mgp->pdev->dev;
1876 size_t hdr_off, pattern_off, hdr_len;
1877 u32 pattern = 0xfffffffe;
1879 /* find running firmware header */
1880 hdr_off = swab32(readl(mgp->sram + MCP_HEADER_PTR_OFFSET));
1881 if ((hdr_off & 3) || hdr_off + sizeof(*hdr) > mgp->sram_size) {
1882 dev_err(dev, "Running firmware has bad header offset (%d)\n",
1886 hdr_len = swab32(readl(mgp->sram + hdr_off +
1887 offsetof(struct mcp_gen_header, header_length)));
1888 pattern_off = hdr_off + offsetof(struct mcp_gen_header, led_pattern);
1889 if (pattern_off >= (hdr_len + hdr_off)) {
1890 dev_info(dev, "Firmware does not support LED identification\n");
1894 pattern = swab32(readl(mgp->sram + pattern_off + 4));
1895 writel(swab32(pattern), mgp->sram + pattern_off);
1900 myri10ge_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
1902 struct myri10ge_priv *mgp = netdev_priv(netdev);
1906 case ETHTOOL_ID_ACTIVE:
1907 rc = myri10ge_led(mgp, 1);
1910 case ETHTOOL_ID_INACTIVE:
1911 rc = myri10ge_led(mgp, 0);
1921 static const struct ethtool_ops myri10ge_ethtool_ops = {
1922 .get_drvinfo = myri10ge_get_drvinfo,
1923 .get_coalesce = myri10ge_get_coalesce,
1924 .set_coalesce = myri10ge_set_coalesce,
1925 .get_pauseparam = myri10ge_get_pauseparam,
1926 .set_pauseparam = myri10ge_set_pauseparam,
1927 .get_ringparam = myri10ge_get_ringparam,
1928 .get_link = ethtool_op_get_link,
1929 .get_strings = myri10ge_get_strings,
1930 .get_sset_count = myri10ge_get_sset_count,
1931 .get_ethtool_stats = myri10ge_get_ethtool_stats,
1932 .set_msglevel = myri10ge_set_msglevel,
1933 .get_msglevel = myri10ge_get_msglevel,
1934 .set_phys_id = myri10ge_phys_id,
1935 .get_link_ksettings = myri10ge_get_link_ksettings,
1938 static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss)
1940 struct myri10ge_priv *mgp = ss->mgp;
1941 struct myri10ge_cmd cmd;
1942 struct net_device *dev = mgp->dev;
1943 int tx_ring_size, rx_ring_size;
1944 int tx_ring_entries, rx_ring_entries;
1945 int i, slice, status;
1948 /* get ring sizes */
1949 slice = ss - mgp->ss;
1951 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd, 0);
1952 tx_ring_size = cmd.data0;
1954 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0);
1957 rx_ring_size = cmd.data0;
1959 tx_ring_entries = tx_ring_size / sizeof(struct mcp_kreq_ether_send);
1960 rx_ring_entries = rx_ring_size / sizeof(struct mcp_dma_addr);
1961 ss->tx.mask = tx_ring_entries - 1;
1962 ss->rx_small.mask = ss->rx_big.mask = rx_ring_entries - 1;
1966 /* allocate the host shadow rings */
1968 bytes = 8 + (MYRI10GE_MAX_SEND_DESC_TSO + 4)
1969 * sizeof(*ss->tx.req_list);
1970 ss->tx.req_bytes = kzalloc(bytes, GFP_KERNEL);
1971 if (ss->tx.req_bytes == NULL)
1972 goto abort_with_nothing;
1974 /* ensure req_list entries are aligned to 8 bytes */
1975 ss->tx.req_list = (struct mcp_kreq_ether_send *)
1976 ALIGN((unsigned long)ss->tx.req_bytes, 8);
1977 ss->tx.queue_active = 0;
1979 bytes = rx_ring_entries * sizeof(*ss->rx_small.shadow);
1980 ss->rx_small.shadow = kzalloc(bytes, GFP_KERNEL);
1981 if (ss->rx_small.shadow == NULL)
1982 goto abort_with_tx_req_bytes;
1984 bytes = rx_ring_entries * sizeof(*ss->rx_big.shadow);
1985 ss->rx_big.shadow = kzalloc(bytes, GFP_KERNEL);
1986 if (ss->rx_big.shadow == NULL)
1987 goto abort_with_rx_small_shadow;
1989 /* allocate the host info rings */
1991 bytes = tx_ring_entries * sizeof(*ss->tx.info);
1992 ss->tx.info = kzalloc(bytes, GFP_KERNEL);
1993 if (ss->tx.info == NULL)
1994 goto abort_with_rx_big_shadow;
1996 bytes = rx_ring_entries * sizeof(*ss->rx_small.info);
1997 ss->rx_small.info = kzalloc(bytes, GFP_KERNEL);
1998 if (ss->rx_small.info == NULL)
1999 goto abort_with_tx_info;
2001 bytes = rx_ring_entries * sizeof(*ss->rx_big.info);
2002 ss->rx_big.info = kzalloc(bytes, GFP_KERNEL);
2003 if (ss->rx_big.info == NULL)
2004 goto abort_with_rx_small_info;
2006 /* Fill the receive rings */
2008 ss->rx_small.cnt = 0;
2009 ss->rx_big.fill_cnt = 0;
2010 ss->rx_small.fill_cnt = 0;
2011 ss->rx_small.page_offset = MYRI10GE_ALLOC_SIZE;
2012 ss->rx_big.page_offset = MYRI10GE_ALLOC_SIZE;
2013 ss->rx_small.watchdog_needed = 0;
2014 ss->rx_big.watchdog_needed = 0;
2015 if (mgp->small_bytes == 0) {
2016 ss->rx_small.fill_cnt = ss->rx_small.mask + 1;
2018 myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
2019 mgp->small_bytes + MXGEFW_PAD, 0);
2022 if (ss->rx_small.fill_cnt < ss->rx_small.mask + 1) {
2023 netdev_err(dev, "slice-%d: alloced only %d small bufs\n",
2024 slice, ss->rx_small.fill_cnt);
2025 goto abort_with_rx_small_ring;
2028 myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0);
2029 if (ss->rx_big.fill_cnt < ss->rx_big.mask + 1) {
2030 netdev_err(dev, "slice-%d: alloced only %d big bufs\n",
2031 slice, ss->rx_big.fill_cnt);
2032 goto abort_with_rx_big_ring;
2037 abort_with_rx_big_ring:
2038 for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) {
2039 int idx = i & ss->rx_big.mask;
2040 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx],
2042 put_page(ss->rx_big.info[idx].page);
2045 abort_with_rx_small_ring:
2046 if (mgp->small_bytes == 0)
2047 ss->rx_small.fill_cnt = ss->rx_small.cnt;
2048 for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) {
2049 int idx = i & ss->rx_small.mask;
2050 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx],
2051 mgp->small_bytes + MXGEFW_PAD);
2052 put_page(ss->rx_small.info[idx].page);
2055 kfree(ss->rx_big.info);
2057 abort_with_rx_small_info:
2058 kfree(ss->rx_small.info);
2063 abort_with_rx_big_shadow:
2064 kfree(ss->rx_big.shadow);
2066 abort_with_rx_small_shadow:
2067 kfree(ss->rx_small.shadow);
2069 abort_with_tx_req_bytes:
2070 kfree(ss->tx.req_bytes);
2071 ss->tx.req_bytes = NULL;
2072 ss->tx.req_list = NULL;
2078 static void myri10ge_free_rings(struct myri10ge_slice_state *ss)
2080 struct myri10ge_priv *mgp = ss->mgp;
2081 struct sk_buff *skb;
2082 struct myri10ge_tx_buf *tx;
2085 /* If not allocated, skip it */
2086 if (ss->tx.req_list == NULL)
2089 for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) {
2090 idx = i & ss->rx_big.mask;
2091 if (i == ss->rx_big.fill_cnt - 1)
2092 ss->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE;
2093 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx],
2095 put_page(ss->rx_big.info[idx].page);
2098 if (mgp->small_bytes == 0)
2099 ss->rx_small.fill_cnt = ss->rx_small.cnt;
2100 for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) {
2101 idx = i & ss->rx_small.mask;
2102 if (i == ss->rx_small.fill_cnt - 1)
2103 ss->rx_small.info[idx].page_offset =
2104 MYRI10GE_ALLOC_SIZE;
2105 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx],
2106 mgp->small_bytes + MXGEFW_PAD);
2107 put_page(ss->rx_small.info[idx].page);
2110 while (tx->done != tx->req) {
2111 idx = tx->done & tx->mask;
2112 skb = tx->info[idx].skb;
2115 tx->info[idx].skb = NULL;
2117 len = dma_unmap_len(&tx->info[idx], len);
2118 dma_unmap_len_set(&tx->info[idx], len, 0);
2120 ss->stats.tx_dropped++;
2121 dev_kfree_skb_any(skb);
2123 pci_unmap_single(mgp->pdev,
2124 dma_unmap_addr(&tx->info[idx],
2129 pci_unmap_page(mgp->pdev,
2130 dma_unmap_addr(&tx->info[idx],
2135 kfree(ss->rx_big.info);
2137 kfree(ss->rx_small.info);
2141 kfree(ss->rx_big.shadow);
2143 kfree(ss->rx_small.shadow);
2145 kfree(ss->tx.req_bytes);
2146 ss->tx.req_bytes = NULL;
2147 ss->tx.req_list = NULL;
2150 static int myri10ge_request_irq(struct myri10ge_priv *mgp)
2152 struct pci_dev *pdev = mgp->pdev;
2153 struct myri10ge_slice_state *ss;
2154 struct net_device *netdev = mgp->dev;
2158 mgp->msi_enabled = 0;
2159 mgp->msix_enabled = 0;
2162 if (mgp->num_slices > 1) {
2163 status = pci_enable_msix_range(pdev, mgp->msix_vectors,
2164 mgp->num_slices, mgp->num_slices);
2167 "Error %d setting up MSI-X\n", status);
2170 mgp->msix_enabled = 1;
2172 if (mgp->msix_enabled == 0) {
2173 status = pci_enable_msi(pdev);
2176 "Error %d setting up MSI; falling back to xPIC\n",
2179 mgp->msi_enabled = 1;
2183 if (mgp->msix_enabled) {
2184 for (i = 0; i < mgp->num_slices; i++) {
2186 snprintf(ss->irq_desc, sizeof(ss->irq_desc),
2187 "%s:slice-%d", netdev->name, i);
2188 status = request_irq(mgp->msix_vectors[i].vector,
2189 myri10ge_intr, 0, ss->irq_desc,
2193 "slice %d failed to allocate IRQ\n", i);
2196 free_irq(mgp->msix_vectors[i].vector,
2200 pci_disable_msix(pdev);
2205 status = request_irq(pdev->irq, myri10ge_intr, IRQF_SHARED,
2206 mgp->dev->name, &mgp->ss[0]);
2208 dev_err(&pdev->dev, "failed to allocate IRQ\n");
2209 if (mgp->msi_enabled)
2210 pci_disable_msi(pdev);
2216 static void myri10ge_free_irq(struct myri10ge_priv *mgp)
2218 struct pci_dev *pdev = mgp->pdev;
2221 if (mgp->msix_enabled) {
2222 for (i = 0; i < mgp->num_slices; i++)
2223 free_irq(mgp->msix_vectors[i].vector, &mgp->ss[i]);
2225 free_irq(pdev->irq, &mgp->ss[0]);
2227 if (mgp->msi_enabled)
2228 pci_disable_msi(pdev);
2229 if (mgp->msix_enabled)
2230 pci_disable_msix(pdev);
2233 static int myri10ge_get_txrx(struct myri10ge_priv *mgp, int slice)
2235 struct myri10ge_cmd cmd;
2236 struct myri10ge_slice_state *ss;
2239 ss = &mgp->ss[slice];
2241 if (slice == 0 || (mgp->dev->real_num_tx_queues > 1)) {
2243 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET,
2245 ss->tx.lanai = (struct mcp_kreq_ether_send __iomem *)
2246 (mgp->sram + cmd.data0);
2249 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SMALL_RX_OFFSET,
2251 ss->rx_small.lanai = (struct mcp_kreq_ether_recv __iomem *)
2252 (mgp->sram + cmd.data0);
2255 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd, 0);
2256 ss->rx_big.lanai = (struct mcp_kreq_ether_recv __iomem *)
2257 (mgp->sram + cmd.data0);
2259 ss->tx.send_go = (__iomem __be32 *)
2260 (mgp->sram + MXGEFW_ETH_SEND_GO + 64 * slice);
2261 ss->tx.send_stop = (__iomem __be32 *)
2262 (mgp->sram + MXGEFW_ETH_SEND_STOP + 64 * slice);
2267 static int myri10ge_set_stats(struct myri10ge_priv *mgp, int slice)
2269 struct myri10ge_cmd cmd;
2270 struct myri10ge_slice_state *ss;
2273 ss = &mgp->ss[slice];
2274 cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->fw_stats_bus);
2275 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->fw_stats_bus);
2276 cmd.data2 = sizeof(struct mcp_irq_data) | (slice << 16);
2277 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd, 0);
2278 if (status == -ENOSYS) {
2279 dma_addr_t bus = ss->fw_stats_bus;
2282 bus += offsetof(struct mcp_irq_data, send_done_count);
2283 cmd.data0 = MYRI10GE_LOWPART_TO_U32(bus);
2284 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(bus);
2285 status = myri10ge_send_cmd(mgp,
2286 MXGEFW_CMD_SET_STATS_DMA_OBSOLETE,
2288 /* Firmware cannot support multicast without STATS_DMA_V2 */
2289 mgp->fw_multicast_support = 0;
2291 mgp->fw_multicast_support = 1;
2296 static int myri10ge_open(struct net_device *dev)
2298 struct myri10ge_slice_state *ss;
2299 struct myri10ge_priv *mgp = netdev_priv(dev);
2300 struct myri10ge_cmd cmd;
2301 int i, status, big_pow2, slice;
2304 if (mgp->running != MYRI10GE_ETH_STOPPED)
2307 mgp->running = MYRI10GE_ETH_STARTING;
2308 status = myri10ge_reset(mgp);
2310 netdev_err(dev, "failed reset\n");
2311 goto abort_with_nothing;
2314 if (mgp->num_slices > 1) {
2315 cmd.data0 = mgp->num_slices;
2316 cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE;
2317 if (mgp->dev->real_num_tx_queues > 1)
2318 cmd.data1 |= MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES;
2319 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES,
2322 netdev_err(dev, "failed to set number of slices\n");
2323 goto abort_with_nothing;
2325 /* setup the indirection table */
2326 cmd.data0 = mgp->num_slices;
2327 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_TABLE_SIZE,
2330 status |= myri10ge_send_cmd(mgp,
2331 MXGEFW_CMD_GET_RSS_TABLE_OFFSET,
2334 netdev_err(dev, "failed to setup rss tables\n");
2335 goto abort_with_nothing;
2338 /* just enable an identity mapping */
2339 itable = mgp->sram + cmd.data0;
2340 for (i = 0; i < mgp->num_slices; i++)
2341 __raw_writeb(i, &itable[i]);
2344 cmd.data1 = myri10ge_rss_hash;
2345 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_ENABLE,
2348 netdev_err(dev, "failed to enable slices\n");
2349 goto abort_with_nothing;
2353 status = myri10ge_request_irq(mgp);
2355 goto abort_with_nothing;
2357 /* decide what small buffer size to use. For good TCP rx
2358 * performance, it is important to not receive 1514 byte
2359 * frames into jumbo buffers, as it confuses the socket buffer
2360 * accounting code, leading to drops and erratic performance.
2363 if (dev->mtu <= ETH_DATA_LEN)
2364 /* enough for a TCP header */
2365 mgp->small_bytes = (128 > SMP_CACHE_BYTES)
2366 ? (128 - MXGEFW_PAD)
2367 : (SMP_CACHE_BYTES - MXGEFW_PAD);
2369 /* enough for a vlan encapsulated ETH_DATA_LEN frame */
2370 mgp->small_bytes = VLAN_ETH_FRAME_LEN;
2372 /* Override the small buffer size? */
2373 if (myri10ge_small_bytes >= 0)
2374 mgp->small_bytes = myri10ge_small_bytes;
2376 /* Firmware needs the big buff size as a power of 2. Lie and
2377 * tell him the buffer is larger, because we only use 1
2378 * buffer/pkt, and the mtu will prevent overruns.
2380 big_pow2 = dev->mtu + ETH_HLEN + VLAN_HLEN + MXGEFW_PAD;
2381 if (big_pow2 < MYRI10GE_ALLOC_SIZE / 2) {
2382 while (!is_power_of_2(big_pow2))
2384 mgp->big_bytes = dev->mtu + ETH_HLEN + VLAN_HLEN + MXGEFW_PAD;
2386 big_pow2 = MYRI10GE_ALLOC_SIZE;
2387 mgp->big_bytes = big_pow2;
2390 /* setup the per-slice data structures */
2391 for (slice = 0; slice < mgp->num_slices; slice++) {
2392 ss = &mgp->ss[slice];
2394 status = myri10ge_get_txrx(mgp, slice);
2396 netdev_err(dev, "failed to get ring sizes or locations\n");
2397 goto abort_with_rings;
2399 status = myri10ge_allocate_rings(ss);
2401 goto abort_with_rings;
2403 /* only firmware which supports multiple TX queues
2404 * supports setting up the tx stats on non-zero
2406 if (slice == 0 || mgp->dev->real_num_tx_queues > 1)
2407 status = myri10ge_set_stats(mgp, slice);
2409 netdev_err(dev, "Couldn't set stats DMA\n");
2410 goto abort_with_rings;
2413 /* must happen prior to any irq */
2414 napi_enable(&(ss)->napi);
2417 /* now give firmware buffers sizes, and MTU */
2418 cmd.data0 = dev->mtu + ETH_HLEN + VLAN_HLEN;
2419 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_MTU, &cmd, 0);
2420 cmd.data0 = mgp->small_bytes;
2422 myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_SMALL_BUFFER_SIZE, &cmd, 0);
2423 cmd.data0 = big_pow2;
2425 myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_BIG_BUFFER_SIZE, &cmd, 0);
2427 netdev_err(dev, "Couldn't set buffer sizes\n");
2428 goto abort_with_rings;
2432 * Set Linux style TSO mode; this is needed only on newer
2433 * firmware versions. Older versions default to Linux
2437 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_TSO_MODE, &cmd, 0);
2438 if (status && status != -ENOSYS) {
2439 netdev_err(dev, "Couldn't set TSO mode\n");
2440 goto abort_with_rings;
2443 mgp->link_state = ~0U;
2444 mgp->rdma_tags_available = 15;
2446 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0);
2448 netdev_err(dev, "Couldn't bring up link\n");
2449 goto abort_with_rings;
2452 mgp->running = MYRI10GE_ETH_RUNNING;
2453 mgp->watchdog_timer.expires = jiffies + myri10ge_watchdog_timeout * HZ;
2454 add_timer(&mgp->watchdog_timer);
2455 netif_tx_wake_all_queues(dev);
2462 napi_disable(&mgp->ss[slice].napi);
2464 for (i = 0; i < mgp->num_slices; i++)
2465 myri10ge_free_rings(&mgp->ss[i]);
2467 myri10ge_free_irq(mgp);
2470 mgp->running = MYRI10GE_ETH_STOPPED;
2474 static int myri10ge_close(struct net_device *dev)
2476 struct myri10ge_priv *mgp = netdev_priv(dev);
2477 struct myri10ge_cmd cmd;
2478 int status, old_down_cnt;
2481 if (mgp->running != MYRI10GE_ETH_RUNNING)
2484 if (mgp->ss[0].tx.req_bytes == NULL)
2487 del_timer_sync(&mgp->watchdog_timer);
2488 mgp->running = MYRI10GE_ETH_STOPPING;
2489 for (i = 0; i < mgp->num_slices; i++)
2490 napi_disable(&mgp->ss[i].napi);
2492 netif_carrier_off(dev);
2494 netif_tx_stop_all_queues(dev);
2495 if (mgp->rebooted == 0) {
2496 old_down_cnt = mgp->down_cnt;
2499 myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_DOWN, &cmd, 0);
2501 netdev_err(dev, "Couldn't bring down link\n");
2503 wait_event_timeout(mgp->down_wq, old_down_cnt != mgp->down_cnt,
2505 if (old_down_cnt == mgp->down_cnt)
2506 netdev_err(dev, "never got down irq\n");
2508 netif_tx_disable(dev);
2509 myri10ge_free_irq(mgp);
2510 for (i = 0; i < mgp->num_slices; i++)
2511 myri10ge_free_rings(&mgp->ss[i]);
2513 mgp->running = MYRI10GE_ETH_STOPPED;
2517 /* copy an array of struct mcp_kreq_ether_send's to the mcp. Copy
2518 * backwards one at a time and handle ring wraps */
2521 myri10ge_submit_req_backwards(struct myri10ge_tx_buf *tx,
2522 struct mcp_kreq_ether_send *src, int cnt)
2524 int idx, starting_slot;
2525 starting_slot = tx->req;
2528 idx = (starting_slot + cnt) & tx->mask;
2529 myri10ge_pio_copy(&tx->lanai[idx], &src[cnt], sizeof(*src));
2535 * copy an array of struct mcp_kreq_ether_send's to the mcp. Copy
2536 * at most 32 bytes at a time, so as to avoid involving the software
2537 * pio handler in the nic. We re-write the first segment's flags
2538 * to mark them valid only after writing the entire chain.
2542 myri10ge_submit_req(struct myri10ge_tx_buf *tx, struct mcp_kreq_ether_send *src,
2546 struct mcp_kreq_ether_send __iomem *dstp, *dst;
2547 struct mcp_kreq_ether_send *srcp;
2550 idx = tx->req & tx->mask;
2552 last_flags = src->flags;
2555 dst = dstp = &tx->lanai[idx];
2558 if ((idx + cnt) < tx->mask) {
2559 for (i = 0; i < (cnt - 1); i += 2) {
2560 myri10ge_pio_copy(dstp, srcp, 2 * sizeof(*src));
2561 mb(); /* force write every 32 bytes */
2566 /* submit all but the first request, and ensure
2567 * that it is submitted below */
2568 myri10ge_submit_req_backwards(tx, src, cnt);
2572 /* submit the first request */
2573 myri10ge_pio_copy(dstp, srcp, sizeof(*src));
2574 mb(); /* barrier before setting valid flag */
2577 /* re-write the last 32-bits with the valid flags */
2578 src->flags = last_flags;
2579 put_be32(*((__be32 *) src + 3), (__be32 __iomem *) dst + 3);
2584 static void myri10ge_unmap_tx_dma(struct myri10ge_priv *mgp,
2585 struct myri10ge_tx_buf *tx, int idx)
2590 /* Free any DMA resources we've alloced and clear out the skb slot */
2591 last_idx = (idx + 1) & tx->mask;
2592 idx = tx->req & tx->mask;
2594 len = dma_unmap_len(&tx->info[idx], len);
2596 if (tx->info[idx].skb != NULL)
2597 pci_unmap_single(mgp->pdev,
2598 dma_unmap_addr(&tx->info[idx],
2602 pci_unmap_page(mgp->pdev,
2603 dma_unmap_addr(&tx->info[idx],
2606 dma_unmap_len_set(&tx->info[idx], len, 0);
2607 tx->info[idx].skb = NULL;
2609 idx = (idx + 1) & tx->mask;
2610 } while (idx != last_idx);
2614 * Transmit a packet. We need to split the packet so that a single
2615 * segment does not cross myri10ge->tx_boundary, so this makes segment
2616 * counting tricky. So rather than try to count segments up front, we
2617 * just give up if there are too few segments to hold a reasonably
2618 * fragmented packet currently available. If we run
2619 * out of segments while preparing a packet for DMA, we just linearize
2623 static netdev_tx_t myri10ge_xmit(struct sk_buff *skb,
2624 struct net_device *dev)
2626 struct myri10ge_priv *mgp = netdev_priv(dev);
2627 struct myri10ge_slice_state *ss;
2628 struct mcp_kreq_ether_send *req;
2629 struct myri10ge_tx_buf *tx;
2630 struct skb_frag_struct *frag;
2631 struct netdev_queue *netdev_queue;
2634 __be32 high_swapped;
2636 int idx, avail, frag_cnt, frag_idx, count, mss, max_segments;
2637 u16 pseudo_hdr_offset, cksum_offset, queue;
2638 int cum_len, seglen, boundary, rdma_count;
2641 queue = skb_get_queue_mapping(skb);
2642 ss = &mgp->ss[queue];
2643 netdev_queue = netdev_get_tx_queue(mgp->dev, queue);
2648 avail = tx->mask - 1 - (tx->req - tx->done);
2651 max_segments = MXGEFW_MAX_SEND_DESC;
2653 if (skb_is_gso(skb)) {
2654 mss = skb_shinfo(skb)->gso_size;
2655 max_segments = MYRI10GE_MAX_SEND_DESC_TSO;
2658 if ((unlikely(avail < max_segments))) {
2659 /* we are out of transmit resources */
2661 netif_tx_stop_queue(netdev_queue);
2662 return NETDEV_TX_BUSY;
2665 /* Setup checksum offloading, if needed */
2667 pseudo_hdr_offset = 0;
2669 flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST);
2670 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
2671 cksum_offset = skb_checksum_start_offset(skb);
2672 pseudo_hdr_offset = cksum_offset + skb->csum_offset;
2673 /* If the headers are excessively large, then we must
2674 * fall back to a software checksum */
2675 if (unlikely(!mss && (cksum_offset > 255 ||
2676 pseudo_hdr_offset > 127))) {
2677 if (skb_checksum_help(skb))
2680 pseudo_hdr_offset = 0;
2682 odd_flag = MXGEFW_FLAGS_ALIGN_ODD;
2683 flags |= MXGEFW_FLAGS_CKSUM;
2689 if (mss) { /* TSO */
2690 /* this removes any CKSUM flag from before */
2691 flags = (MXGEFW_FLAGS_TSO_HDR | MXGEFW_FLAGS_FIRST);
2693 /* negative cum_len signifies to the
2694 * send loop that we are still in the
2695 * header portion of the TSO packet.
2696 * TSO header can be at most 1KB long */
2697 cum_len = -(skb_transport_offset(skb) + tcp_hdrlen(skb));
2699 /* for IPv6 TSO, the checksum offset stores the
2700 * TCP header length, to save the firmware from
2701 * the need to parse the headers */
2702 if (skb_is_gso_v6(skb)) {
2703 cksum_offset = tcp_hdrlen(skb);
2704 /* Can only handle headers <= max_tso6 long */
2705 if (unlikely(-cum_len > mgp->max_tso6))
2706 return myri10ge_sw_tso(skb, dev);
2708 /* for TSO, pseudo_hdr_offset holds mss.
2709 * The firmware figures out where to put
2710 * the checksum by parsing the header. */
2711 pseudo_hdr_offset = mss;
2713 /* Mark small packets, and pad out tiny packets */
2714 if (skb->len <= MXGEFW_SEND_SMALL_SIZE) {
2715 flags |= MXGEFW_FLAGS_SMALL;
2717 /* pad frames to at least ETH_ZLEN bytes */
2718 if (eth_skb_pad(skb)) {
2719 /* The packet is gone, so we must
2721 ss->stats.tx_dropped += 1;
2722 return NETDEV_TX_OK;
2726 /* map the skb for DMA */
2727 len = skb_headlen(skb);
2728 bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE);
2729 if (unlikely(pci_dma_mapping_error(mgp->pdev, bus)))
2732 idx = tx->req & tx->mask;
2733 tx->info[idx].skb = skb;
2734 dma_unmap_addr_set(&tx->info[idx], bus, bus);
2735 dma_unmap_len_set(&tx->info[idx], len, len);
2737 frag_cnt = skb_shinfo(skb)->nr_frags;
2742 /* "rdma_count" is the number of RDMAs belonging to the
2743 * current packet BEFORE the current send request. For
2744 * non-TSO packets, this is equal to "count".
2745 * For TSO packets, rdma_count needs to be reset
2746 * to 0 after a segment cut.
2748 * The rdma_count field of the send request is
2749 * the number of RDMAs of the packet starting at
2750 * that request. For TSO send requests with one ore more cuts
2751 * in the middle, this is the number of RDMAs starting
2752 * after the last cut in the request. All previous
2753 * segments before the last cut implicitly have 1 RDMA.
2755 * Since the number of RDMAs is not known beforehand,
2756 * it must be filled-in retroactively - after each
2757 * segmentation cut or at the end of the entire packet.
2761 /* Break the SKB or Fragment up into pieces which
2762 * do not cross mgp->tx_boundary */
2763 low = MYRI10GE_LOWPART_TO_U32(bus);
2764 high_swapped = htonl(MYRI10GE_HIGHPART_TO_U32(bus));
2769 if (unlikely(count == max_segments))
2770 goto abort_linearize;
2773 (low + mgp->tx_boundary) & ~(mgp->tx_boundary - 1);
2774 seglen = boundary - low;
2777 flags_next = flags & ~MXGEFW_FLAGS_FIRST;
2778 cum_len_next = cum_len + seglen;
2779 if (mss) { /* TSO */
2780 (req - rdma_count)->rdma_count = rdma_count + 1;
2782 if (likely(cum_len >= 0)) { /* payload */
2783 int next_is_first, chop;
2785 chop = (cum_len_next > mss);
2786 cum_len_next = cum_len_next % mss;
2787 next_is_first = (cum_len_next == 0);
2788 flags |= chop * MXGEFW_FLAGS_TSO_CHOP;
2789 flags_next |= next_is_first *
2791 rdma_count |= -(chop | next_is_first);
2792 rdma_count += chop & ~next_is_first;
2793 } else if (likely(cum_len_next >= 0)) { /* header ends */
2799 small = (mss <= MXGEFW_SEND_SMALL_SIZE);
2800 flags_next = MXGEFW_FLAGS_TSO_PLD |
2801 MXGEFW_FLAGS_FIRST |
2802 (small * MXGEFW_FLAGS_SMALL);
2805 req->addr_high = high_swapped;
2806 req->addr_low = htonl(low);
2807 req->pseudo_hdr_offset = htons(pseudo_hdr_offset);
2808 req->pad = 0; /* complete solid 16-byte block; does this matter? */
2809 req->rdma_count = 1;
2810 req->length = htons(seglen);
2811 req->cksum_offset = cksum_offset;
2812 req->flags = flags | ((cum_len & 1) * odd_flag);
2816 cum_len = cum_len_next;
2821 if (cksum_offset != 0 && !(mss && skb_is_gso_v6(skb))) {
2822 if (unlikely(cksum_offset > seglen))
2823 cksum_offset -= seglen;
2828 if (frag_idx == frag_cnt)
2831 /* map next fragment for DMA */
2832 frag = &skb_shinfo(skb)->frags[frag_idx];
2834 len = skb_frag_size(frag);
2835 bus = skb_frag_dma_map(&mgp->pdev->dev, frag, 0, len,
2837 if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) {
2838 myri10ge_unmap_tx_dma(mgp, tx, idx);
2841 idx = (count + tx->req) & tx->mask;
2842 dma_unmap_addr_set(&tx->info[idx], bus, bus);
2843 dma_unmap_len_set(&tx->info[idx], len, len);
2846 (req - rdma_count)->rdma_count = rdma_count;
2850 req->flags |= MXGEFW_FLAGS_TSO_LAST;
2851 } while (!(req->flags & (MXGEFW_FLAGS_TSO_CHOP |
2852 MXGEFW_FLAGS_FIRST)));
2853 idx = ((count - 1) + tx->req) & tx->mask;
2854 tx->info[idx].last = 1;
2855 myri10ge_submit_req(tx, tx->req_list, count);
2856 /* if using multiple tx queues, make sure NIC polls the
2858 if ((mgp->dev->real_num_tx_queues > 1) && tx->queue_active == 0) {
2859 tx->queue_active = 1;
2860 put_be32(htonl(1), tx->send_go);
2865 if ((avail - count) < MXGEFW_MAX_SEND_DESC) {
2867 netif_tx_stop_queue(netdev_queue);
2869 return NETDEV_TX_OK;
2872 myri10ge_unmap_tx_dma(mgp, tx, idx);
2874 if (skb_is_gso(skb)) {
2875 netdev_err(mgp->dev, "TSO but wanted to linearize?!?!?\n");
2879 if (skb_linearize(skb))
2886 dev_kfree_skb_any(skb);
2887 ss->stats.tx_dropped += 1;
2888 return NETDEV_TX_OK;
2892 static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
2893 struct net_device *dev)
2895 struct sk_buff *segs, *curr;
2896 struct myri10ge_priv *mgp = netdev_priv(dev);
2897 struct myri10ge_slice_state *ss;
2900 segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6);
2908 status = myri10ge_xmit(curr, dev);
2910 dev_kfree_skb_any(curr);
2915 dev_kfree_skb_any(segs);
2920 dev_kfree_skb_any(skb);
2921 return NETDEV_TX_OK;
2924 ss = &mgp->ss[skb_get_queue_mapping(skb)];
2925 dev_kfree_skb_any(skb);
2926 ss->stats.tx_dropped += 1;
2927 return NETDEV_TX_OK;
2930 static void myri10ge_get_stats(struct net_device *dev,
2931 struct rtnl_link_stats64 *stats)
2933 const struct myri10ge_priv *mgp = netdev_priv(dev);
2934 const struct myri10ge_slice_netstats *slice_stats;
2937 for (i = 0; i < mgp->num_slices; i++) {
2938 slice_stats = &mgp->ss[i].stats;
2939 stats->rx_packets += slice_stats->rx_packets;
2940 stats->tx_packets += slice_stats->tx_packets;
2941 stats->rx_bytes += slice_stats->rx_bytes;
2942 stats->tx_bytes += slice_stats->tx_bytes;
2943 stats->rx_dropped += slice_stats->rx_dropped;
2944 stats->tx_dropped += slice_stats->tx_dropped;
2948 static void myri10ge_set_multicast_list(struct net_device *dev)
2950 struct myri10ge_priv *mgp = netdev_priv(dev);
2951 struct myri10ge_cmd cmd;
2952 struct netdev_hw_addr *ha;
2953 __be32 data[2] = { 0, 0 };
2956 /* can be called from atomic contexts,
2957 * pass 1 to force atomicity in myri10ge_send_cmd() */
2958 myri10ge_change_promisc(mgp, dev->flags & IFF_PROMISC, 1);
2960 /* This firmware is known to not support multicast */
2961 if (!mgp->fw_multicast_support)
2964 /* Disable multicast filtering */
2966 err = myri10ge_send_cmd(mgp, MXGEFW_ENABLE_ALLMULTI, &cmd, 1);
2968 netdev_err(dev, "Failed MXGEFW_ENABLE_ALLMULTI, error status: %d\n",
2973 if ((dev->flags & IFF_ALLMULTI) || mgp->adopted_rx_filter_bug) {
2974 /* request to disable multicast filtering, so quit here */
2978 /* Flush the filters */
2980 err = myri10ge_send_cmd(mgp, MXGEFW_LEAVE_ALL_MULTICAST_GROUPS,
2983 netdev_err(dev, "Failed MXGEFW_LEAVE_ALL_MULTICAST_GROUPS, error status: %d\n",
2988 /* Walk the multicast list, and add each address */
2989 netdev_for_each_mc_addr(ha, dev) {
2990 memcpy(data, &ha->addr, ETH_ALEN);
2991 cmd.data0 = ntohl(data[0]);
2992 cmd.data1 = ntohl(data[1]);
2993 err = myri10ge_send_cmd(mgp, MXGEFW_JOIN_MULTICAST_GROUP,
2997 netdev_err(dev, "Failed MXGEFW_JOIN_MULTICAST_GROUP, error status:%d %pM\n",
3002 /* Enable multicast filtering */
3003 err = myri10ge_send_cmd(mgp, MXGEFW_DISABLE_ALLMULTI, &cmd, 1);
3005 netdev_err(dev, "Failed MXGEFW_DISABLE_ALLMULTI, error status: %d\n",
3016 static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
3018 struct sockaddr *sa = addr;
3019 struct myri10ge_priv *mgp = netdev_priv(dev);
3022 if (!is_valid_ether_addr(sa->sa_data))
3023 return -EADDRNOTAVAIL;
3025 status = myri10ge_update_mac_address(mgp, sa->sa_data);
3027 netdev_err(dev, "changing mac address failed with %d\n",
3032 /* change the dev structure */
3033 memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
3037 static int myri10ge_change_mtu(struct net_device *dev, int new_mtu)
3039 struct myri10ge_priv *mgp = netdev_priv(dev);
3042 netdev_info(dev, "changing mtu from %d to %d\n", dev->mtu, new_mtu);
3044 /* if we change the mtu on an active device, we must
3045 * reset the device so the firmware sees the change */
3046 myri10ge_close(dev);
3056 * Enable ECRC to align PCI-E Completion packets on an 8-byte boundary.
3057 * Only do it if the bridge is a root port since we don't want to disturb
3058 * any other device, except if forced with myri10ge_ecrc_enable > 1.
3061 static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp)
3063 struct pci_dev *bridge = mgp->pdev->bus->self;
3064 struct device *dev = &mgp->pdev->dev;
3069 if (!myri10ge_ecrc_enable || !bridge)
3072 /* check that the bridge is a root port */
3073 if (pci_pcie_type(bridge) != PCI_EXP_TYPE_ROOT_PORT) {
3074 if (myri10ge_ecrc_enable > 1) {
3075 struct pci_dev *prev_bridge, *old_bridge = bridge;
3077 /* Walk the hierarchy up to the root port
3078 * where ECRC has to be enabled */
3080 prev_bridge = bridge;
3081 bridge = bridge->bus->self;
3082 if (!bridge || prev_bridge == bridge) {
3084 "Failed to find root port"
3085 " to force ECRC\n");
3088 } while (pci_pcie_type(bridge) !=
3089 PCI_EXP_TYPE_ROOT_PORT);
3092 "Forcing ECRC on non-root port %s"
3093 " (enabling on root port %s)\n",
3094 pci_name(old_bridge), pci_name(bridge));
3097 "Not enabling ECRC on non-root port %s\n",
3103 cap = pci_find_ext_capability(bridge, PCI_EXT_CAP_ID_ERR);
3107 ret = pci_read_config_dword(bridge, cap + PCI_ERR_CAP, &err_cap);
3109 dev_err(dev, "failed reading ext-conf-space of %s\n",
3111 dev_err(dev, "\t pci=nommconf in use? "
3112 "or buggy/incomplete/absent ACPI MCFG attr?\n");
3115 if (!(err_cap & PCI_ERR_CAP_ECRC_GENC))
3118 err_cap |= PCI_ERR_CAP_ECRC_GENE;
3119 pci_write_config_dword(bridge, cap + PCI_ERR_CAP, err_cap);
3120 dev_info(dev, "Enabled ECRC on upstream bridge %s\n", pci_name(bridge));
3124 * The Lanai Z8E PCI-E interface achieves higher Read-DMA throughput
3125 * when the PCI-E Completion packets are aligned on an 8-byte
3126 * boundary. Some PCI-E chip sets always align Completion packets; on
3127 * the ones that do not, the alignment can be enforced by enabling
3128 * ECRC generation (if supported).
3130 * When PCI-E Completion packets are not aligned, it is actually more
3131 * efficient to limit Read-DMA transactions to 2KB, rather than 4KB.
3135 static void myri10ge_firmware_probe(struct myri10ge_priv *mgp)
3137 struct pci_dev *pdev = mgp->pdev;
3138 struct device *dev = &pdev->dev;
3141 mgp->tx_boundary = 4096;
3143 * Verify the max read request size was set to 4KB
3144 * before trying the test with 4KB.
3146 status = pcie_get_readrq(pdev);
3148 dev_err(dev, "Couldn't read max read req size: %d\n", status);
3151 if (status != 4096) {
3152 dev_warn(dev, "Max Read Request size != 4096 (%d)\n", status);
3153 mgp->tx_boundary = 2048;
3156 * load the optimized firmware (which assumes aligned PCIe
3157 * completions) in order to see if it works on this host.
3159 set_fw_name(mgp, myri10ge_fw_aligned, false);
3160 status = myri10ge_load_firmware(mgp, 1);
3166 * Enable ECRC if possible
3168 myri10ge_enable_ecrc(mgp);
3171 * Run a DMA test which watches for unaligned completions and
3172 * aborts on the first one seen.
3175 status = myri10ge_dma_test(mgp, MXGEFW_CMD_UNALIGNED_TEST);
3177 return; /* keep the aligned firmware */
3179 if (status != -E2BIG)
3180 dev_warn(dev, "DMA test failed: %d\n", status);
3181 if (status == -ENOSYS)
3182 dev_warn(dev, "Falling back to ethp! "
3183 "Please install up to date fw\n");
3185 /* fall back to using the unaligned firmware */
3186 mgp->tx_boundary = 2048;
3187 set_fw_name(mgp, myri10ge_fw_unaligned, false);
3190 static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
3194 if (myri10ge_force_firmware == 0) {
3198 pcie_capability_read_word(mgp->pdev, PCI_EXP_LNKSTA, &lnk);
3199 link_width = (lnk >> 4) & 0x3f;
3201 /* Check to see if Link is less than 8 or if the
3202 * upstream bridge is known to provide aligned
3204 if (link_width < 8) {
3205 dev_info(&mgp->pdev->dev, "PCIE x%d Link\n",
3207 mgp->tx_boundary = 4096;
3208 set_fw_name(mgp, myri10ge_fw_aligned, false);
3210 myri10ge_firmware_probe(mgp);
3213 if (myri10ge_force_firmware == 1) {
3214 dev_info(&mgp->pdev->dev,
3215 "Assuming aligned completions (forced)\n");
3216 mgp->tx_boundary = 4096;
3217 set_fw_name(mgp, myri10ge_fw_aligned, false);
3219 dev_info(&mgp->pdev->dev,
3220 "Assuming unaligned completions (forced)\n");
3221 mgp->tx_boundary = 2048;
3222 set_fw_name(mgp, myri10ge_fw_unaligned, false);
3226 kernel_param_lock(THIS_MODULE);
3227 if (myri10ge_fw_name != NULL) {
3228 char *fw_name = kstrdup(myri10ge_fw_name, GFP_KERNEL);
3231 set_fw_name(mgp, fw_name, true);
3234 kernel_param_unlock(THIS_MODULE);
3236 if (mgp->board_number < MYRI10GE_MAX_BOARDS &&
3237 myri10ge_fw_names[mgp->board_number] != NULL &&
3238 strlen(myri10ge_fw_names[mgp->board_number])) {
3239 set_fw_name(mgp, myri10ge_fw_names[mgp->board_number], false);
3243 dev_info(&mgp->pdev->dev, "overriding firmware to %s\n",
3247 static void myri10ge_mask_surprise_down(struct pci_dev *pdev)
3249 struct pci_dev *bridge = pdev->bus->self;
3256 cap = pci_find_ext_capability(bridge, PCI_EXT_CAP_ID_ERR);
3258 /* a sram parity error can cause a surprise link
3259 * down; since we expect and can recover from sram
3260 * parity errors, mask surprise link down events */
3261 pci_read_config_dword(bridge, cap + PCI_ERR_UNCOR_MASK, &mask);
3263 pci_write_config_dword(bridge, cap + PCI_ERR_UNCOR_MASK, mask);
3268 static int myri10ge_suspend(struct pci_dev *pdev, pm_message_t state)
3270 struct myri10ge_priv *mgp;
3271 struct net_device *netdev;
3273 mgp = pci_get_drvdata(pdev);
3278 netif_device_detach(netdev);
3279 if (netif_running(netdev)) {
3280 netdev_info(netdev, "closing\n");
3282 myri10ge_close(netdev);
3285 myri10ge_dummy_rdma(mgp, 0);
3286 pci_save_state(pdev);
3287 pci_disable_device(pdev);
3289 return pci_set_power_state(pdev, pci_choose_state(pdev, state));
3292 static int myri10ge_resume(struct pci_dev *pdev)
3294 struct myri10ge_priv *mgp;
3295 struct net_device *netdev;
3299 mgp = pci_get_drvdata(pdev);
3303 pci_set_power_state(pdev, PCI_D0); /* zeros conf space as a side effect */
3304 msleep(5); /* give card time to respond */
3305 pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor);
3306 if (vendor == 0xffff) {
3307 netdev_err(mgp->dev, "device disappeared!\n");
3311 pci_restore_state(pdev);
3313 status = pci_enable_device(pdev);
3315 dev_err(&pdev->dev, "failed to enable device\n");
3319 pci_set_master(pdev);
3321 myri10ge_reset(mgp);
3322 myri10ge_dummy_rdma(mgp, 1);
3324 /* Save configuration space to be restored if the
3325 * nic resets due to a parity error */
3326 pci_save_state(pdev);
3328 if (netif_running(netdev)) {
3330 status = myri10ge_open(netdev);
3333 goto abort_with_enabled;
3336 netif_device_attach(netdev);
3341 pci_disable_device(pdev);
3345 #endif /* CONFIG_PM */
3347 static u32 myri10ge_read_reboot(struct myri10ge_priv *mgp)
3349 struct pci_dev *pdev = mgp->pdev;
3350 int vs = mgp->vendor_specific_offset;
3353 /*enter read32 mode */
3354 pci_write_config_byte(pdev, vs + 0x10, 0x3);
3356 /*read REBOOT_STATUS (0xfffffff0) */
3357 pci_write_config_dword(pdev, vs + 0x18, 0xfffffff0);
3358 pci_read_config_dword(pdev, vs + 0x14, &reboot);
3363 myri10ge_check_slice(struct myri10ge_slice_state *ss, int *reset_needed,
3364 int *busy_slice_cnt, u32 rx_pause_cnt)
3366 struct myri10ge_priv *mgp = ss->mgp;
3367 int slice = ss - mgp->ss;
3369 if (ss->tx.req != ss->tx.done &&
3370 ss->tx.done == ss->watchdog_tx_done &&
3371 ss->watchdog_tx_req != ss->watchdog_tx_done) {
3372 /* nic seems like it might be stuck.. */
3373 if (rx_pause_cnt != mgp->watchdog_pause) {
3374 if (net_ratelimit())
3375 netdev_warn(mgp->dev, "slice %d: TX paused, "
3376 "check link partner\n", slice);
3378 netdev_warn(mgp->dev,
3379 "slice %d: TX stuck %d %d %d %d %d %d\n",
3380 slice, ss->tx.queue_active, ss->tx.req,
3381 ss->tx.done, ss->tx.pkt_start,
3383 (int)ntohl(mgp->ss[slice].fw_stats->
3389 if (ss->watchdog_tx_done != ss->tx.done ||
3390 ss->watchdog_rx_done != ss->rx_done.cnt) {
3391 *busy_slice_cnt += 1;
3393 ss->watchdog_tx_done = ss->tx.done;
3394 ss->watchdog_tx_req = ss->tx.req;
3395 ss->watchdog_rx_done = ss->rx_done.cnt;
3399 * This watchdog is used to check whether the board has suffered
3400 * from a parity error and needs to be recovered.
3402 static void myri10ge_watchdog(struct work_struct *work)
3404 struct myri10ge_priv *mgp =
3405 container_of(work, struct myri10ge_priv, watchdog_work);
3406 struct myri10ge_slice_state *ss;
3407 u32 reboot, rx_pause_cnt;
3408 int status, rebooted;
3410 int reset_needed = 0;
3411 int busy_slice_cnt = 0;
3414 mgp->watchdog_resets++;
3415 pci_read_config_word(mgp->pdev, PCI_COMMAND, &cmd);
3417 if ((cmd & PCI_COMMAND_MASTER) == 0) {
3418 /* Bus master DMA disabled? Check to see
3419 * if the card rebooted due to a parity error
3420 * For now, just report it */
3421 reboot = myri10ge_read_reboot(mgp);
3422 netdev_err(mgp->dev, "NIC rebooted (0x%x),%s resetting\n",
3423 reboot, myri10ge_reset_recover ? "" : " not");
3424 if (myri10ge_reset_recover == 0)
3429 myri10ge_close(mgp->dev);
3430 myri10ge_reset_recover--;
3433 * A rebooted nic will come back with config space as
3434 * it was after power was applied to PCIe bus.
3435 * Attempt to restore config space which was saved
3436 * when the driver was loaded, or the last time the
3437 * nic was resumed from power saving mode.
3439 pci_restore_state(mgp->pdev);
3441 /* save state again for accounting reasons */
3442 pci_save_state(mgp->pdev);
3445 /* if we get back -1's from our slot, perhaps somebody
3446 * powered off our card. Don't try to reset it in
3448 if (cmd == 0xffff) {
3449 pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor);
3450 if (vendor == 0xffff) {
3451 netdev_err(mgp->dev, "device disappeared!\n");
3455 /* Perhaps it is a software error. See if stuck slice
3456 * has recovered, reset if not */
3457 rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause);
3458 for (i = 0; i < mgp->num_slices; i++) {
3461 myri10ge_check_slice(ss, &reset_needed,
3467 if (!reset_needed) {
3468 netdev_dbg(mgp->dev, "not resetting\n");
3472 netdev_err(mgp->dev, "device timeout, resetting\n");
3477 myri10ge_close(mgp->dev);
3479 status = myri10ge_load_firmware(mgp, 1);
3481 netdev_err(mgp->dev, "failed to load firmware\n");
3483 myri10ge_open(mgp->dev);
3488 * We use our own timer routine rather than relying upon
3489 * netdev->tx_timeout because we have a very large hardware transmit
3490 * queue. Due to the large queue, the netdev->tx_timeout function
3491 * cannot detect a NIC with a parity error in a timely fashion if the
3492 * NIC is lightly loaded.
3494 static void myri10ge_watchdog_timer(struct timer_list *t)
3496 struct myri10ge_priv *mgp;
3497 struct myri10ge_slice_state *ss;
3498 int i, reset_needed, busy_slice_cnt;
3502 mgp = from_timer(mgp, t, watchdog_timer);
3504 rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause);
3506 for (i = 0, reset_needed = 0;
3507 i < mgp->num_slices && reset_needed == 0; ++i) {
3510 if (ss->rx_small.watchdog_needed) {
3511 myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
3512 mgp->small_bytes + MXGEFW_PAD,
3514 if (ss->rx_small.fill_cnt - ss->rx_small.cnt >=
3515 myri10ge_fill_thresh)
3516 ss->rx_small.watchdog_needed = 0;
3518 if (ss->rx_big.watchdog_needed) {
3519 myri10ge_alloc_rx_pages(mgp, &ss->rx_big,
3521 if (ss->rx_big.fill_cnt - ss->rx_big.cnt >=
3522 myri10ge_fill_thresh)
3523 ss->rx_big.watchdog_needed = 0;
3525 myri10ge_check_slice(ss, &reset_needed, &busy_slice_cnt,
3528 /* if we've sent or received no traffic, poll the NIC to
3529 * ensure it is still there. Otherwise, we risk not noticing
3530 * an error in a timely fashion */
3531 if (busy_slice_cnt == 0) {
3532 pci_read_config_word(mgp->pdev, PCI_COMMAND, &cmd);
3533 if ((cmd & PCI_COMMAND_MASTER) == 0) {
3537 mgp->watchdog_pause = rx_pause_cnt;
3540 schedule_work(&mgp->watchdog_work);
3543 mod_timer(&mgp->watchdog_timer,
3544 jiffies + myri10ge_watchdog_timeout * HZ);
3548 static void myri10ge_free_slices(struct myri10ge_priv *mgp)
3550 struct myri10ge_slice_state *ss;
3551 struct pci_dev *pdev = mgp->pdev;
3555 if (mgp->ss == NULL)
3558 for (i = 0; i < mgp->num_slices; i++) {
3560 if (ss->rx_done.entry != NULL) {
3561 bytes = mgp->max_intr_slots *
3562 sizeof(*ss->rx_done.entry);
3563 dma_free_coherent(&pdev->dev, bytes,
3564 ss->rx_done.entry, ss->rx_done.bus);
3565 ss->rx_done.entry = NULL;
3567 if (ss->fw_stats != NULL) {
3568 bytes = sizeof(*ss->fw_stats);
3569 dma_free_coherent(&pdev->dev, bytes,
3570 ss->fw_stats, ss->fw_stats_bus);
3571 ss->fw_stats = NULL;
3573 napi_hash_del(&ss->napi);
3574 netif_napi_del(&ss->napi);
3576 /* Wait till napi structs are no longer used, and then free ss. */
3582 static int myri10ge_alloc_slices(struct myri10ge_priv *mgp)
3584 struct myri10ge_slice_state *ss;
3585 struct pci_dev *pdev = mgp->pdev;
3589 bytes = sizeof(*mgp->ss) * mgp->num_slices;
3590 mgp->ss = kzalloc(bytes, GFP_KERNEL);
3591 if (mgp->ss == NULL) {
3595 for (i = 0; i < mgp->num_slices; i++) {
3597 bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry);
3598 ss->rx_done.entry = dma_zalloc_coherent(&pdev->dev, bytes,
3601 if (ss->rx_done.entry == NULL)
3603 bytes = sizeof(*ss->fw_stats);
3604 ss->fw_stats = dma_alloc_coherent(&pdev->dev, bytes,
3607 if (ss->fw_stats == NULL)
3611 netif_napi_add(ss->dev, &ss->napi, myri10ge_poll,
3612 myri10ge_napi_weight);
3616 myri10ge_free_slices(mgp);
3621 * This function determines the number of slices supported.
3622 * The number slices is the minimum of the number of CPUS,
3623 * the number of MSI-X irqs supported, the number of slices
3624 * supported by the firmware
3626 static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
3628 struct myri10ge_cmd cmd;
3629 struct pci_dev *pdev = mgp->pdev;
3632 int i, status, ncpus;
3634 mgp->num_slices = 1;
3635 ncpus = netif_get_num_default_rss_queues();
3637 if (myri10ge_max_slices == 1 || !pdev->msix_cap ||
3638 (myri10ge_max_slices == -1 && ncpus < 2))
3641 /* try to load the slice aware rss firmware */
3642 old_fw = mgp->fw_name;
3643 old_allocated = mgp->fw_name_allocated;
3644 /* don't free old_fw if we override it. */
3645 mgp->fw_name_allocated = false;
3647 if (myri10ge_fw_name != NULL) {
3648 dev_info(&mgp->pdev->dev, "overriding rss firmware to %s\n",
3650 set_fw_name(mgp, myri10ge_fw_name, false);
3651 } else if (old_fw == myri10ge_fw_aligned)
3652 set_fw_name(mgp, myri10ge_fw_rss_aligned, false);
3654 set_fw_name(mgp, myri10ge_fw_rss_unaligned, false);
3655 status = myri10ge_load_firmware(mgp, 0);
3657 dev_info(&pdev->dev, "Rss firmware not found\n");
3663 /* hit the board with a reset to ensure it is alive */
3664 memset(&cmd, 0, sizeof(cmd));
3665 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_RESET, &cmd, 0);
3667 dev_err(&mgp->pdev->dev, "failed reset\n");
3671 mgp->max_intr_slots = cmd.data0 / sizeof(struct mcp_slot);
3673 /* tell it the size of the interrupt queues */
3674 cmd.data0 = mgp->max_intr_slots * sizeof(struct mcp_slot);
3675 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0);
3677 dev_err(&mgp->pdev->dev, "failed MXGEFW_CMD_SET_INTRQ_SIZE\n");
3681 /* ask the maximum number of slices it supports */
3682 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_RSS_QUEUES, &cmd, 0);
3686 mgp->num_slices = cmd.data0;
3688 /* Only allow multiple slices if MSI-X is usable */
3689 if (!myri10ge_msi) {
3693 /* if the admin did not specify a limit to how many
3694 * slices we should use, cap it automatically to the
3695 * number of CPUs currently online */
3696 if (myri10ge_max_slices == -1)
3697 myri10ge_max_slices = ncpus;
3699 if (mgp->num_slices > myri10ge_max_slices)
3700 mgp->num_slices = myri10ge_max_slices;
3702 /* Now try to allocate as many MSI-X vectors as we have
3703 * slices. We give up on MSI-X if we can only get a single
3706 mgp->msix_vectors = kcalloc(mgp->num_slices, sizeof(*mgp->msix_vectors),
3708 if (mgp->msix_vectors == NULL)
3710 for (i = 0; i < mgp->num_slices; i++) {
3711 mgp->msix_vectors[i].entry = i;
3714 while (mgp->num_slices > 1) {
3715 mgp->num_slices = rounddown_pow_of_two(mgp->num_slices);
3716 if (mgp->num_slices == 1)
3718 status = pci_enable_msix_range(pdev,
3725 pci_disable_msix(pdev);
3727 if (status == mgp->num_slices) {
3732 mgp->num_slices = status;
3737 if (mgp->msix_vectors != NULL) {
3738 kfree(mgp->msix_vectors);
3739 mgp->msix_vectors = NULL;
3743 mgp->num_slices = 1;
3744 set_fw_name(mgp, old_fw, old_allocated);
3745 myri10ge_load_firmware(mgp, 0);
3748 static const struct net_device_ops myri10ge_netdev_ops = {
3749 .ndo_open = myri10ge_open,
3750 .ndo_stop = myri10ge_close,
3751 .ndo_start_xmit = myri10ge_xmit,
3752 .ndo_get_stats64 = myri10ge_get_stats,
3753 .ndo_validate_addr = eth_validate_addr,
3754 .ndo_change_mtu = myri10ge_change_mtu,
3755 .ndo_set_rx_mode = myri10ge_set_multicast_list,
3756 .ndo_set_mac_address = myri10ge_set_mac_address,
3759 static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3761 struct net_device *netdev;
3762 struct myri10ge_priv *mgp;
3763 struct device *dev = &pdev->dev;
3765 int status = -ENXIO;
3767 unsigned hdr_offset, ss_offset;
3768 static int board_number;
3770 netdev = alloc_etherdev_mq(sizeof(*mgp), MYRI10GE_MAX_SLICES);
3774 SET_NETDEV_DEV(netdev, &pdev->dev);
3776 mgp = netdev_priv(netdev);
3779 mgp->pause = myri10ge_flow_control;
3780 mgp->intr_coal_delay = myri10ge_intr_coal_delay;
3781 mgp->msg_enable = netif_msg_init(myri10ge_debug, MYRI10GE_MSG_DEFAULT);
3782 mgp->board_number = board_number;
3783 init_waitqueue_head(&mgp->down_wq);
3785 if (pci_enable_device(pdev)) {
3786 dev_err(&pdev->dev, "pci_enable_device call failed\n");
3788 goto abort_with_netdev;
3791 /* Find the vendor-specific cap so we can check
3792 * the reboot register later on */
3793 mgp->vendor_specific_offset
3794 = pci_find_capability(pdev, PCI_CAP_ID_VNDR);
3796 /* Set our max read request to 4KB */
3797 status = pcie_set_readrq(pdev, 4096);
3799 dev_err(&pdev->dev, "Error %d writing PCI_EXP_DEVCTL\n",
3801 goto abort_with_enabled;
3804 myri10ge_mask_surprise_down(pdev);
3805 pci_set_master(pdev);
3807 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
3811 "64-bit pci address mask was refused, "
3813 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3816 dev_err(&pdev->dev, "Error %d setting DMA mask\n", status);
3817 goto abort_with_enabled;
3819 (void)pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3820 mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd),
3821 &mgp->cmd_bus, GFP_KERNEL);
3824 goto abort_with_enabled;
3827 mgp->board_span = pci_resource_len(pdev, 0);
3828 mgp->iomem_base = pci_resource_start(pdev, 0);
3829 mgp->wc_cookie = arch_phys_wc_add(mgp->iomem_base, mgp->board_span);
3830 mgp->sram = ioremap_wc(mgp->iomem_base, mgp->board_span);
3831 if (mgp->sram == NULL) {
3832 dev_err(&pdev->dev, "ioremap failed for %ld bytes at 0x%lx\n",
3833 mgp->board_span, mgp->iomem_base);
3835 goto abort_with_mtrr;
3838 swab32(readl(mgp->sram + MCP_HEADER_PTR_OFFSET)) & 0xffffc;
3839 ss_offset = hdr_offset + offsetof(struct mcp_gen_header, string_specs);
3840 mgp->sram_size = swab32(readl(mgp->sram + ss_offset));
3841 if (mgp->sram_size > mgp->board_span ||
3842 mgp->sram_size <= MYRI10GE_FW_OFFSET) {
3844 "invalid sram_size %dB or board span %ldB\n",
3845 mgp->sram_size, mgp->board_span);
3847 goto abort_with_ioremap;
3849 memcpy_fromio(mgp->eeprom_strings,
3850 mgp->sram + mgp->sram_size, MYRI10GE_EEPROM_STRINGS_SIZE);
3851 memset(mgp->eeprom_strings + MYRI10GE_EEPROM_STRINGS_SIZE - 2, 0, 2);
3852 status = myri10ge_read_mac_addr(mgp);
3854 goto abort_with_ioremap;
3856 for (i = 0; i < ETH_ALEN; i++)
3857 netdev->dev_addr[i] = mgp->mac_addr[i];
3859 myri10ge_select_firmware(mgp);
3861 status = myri10ge_load_firmware(mgp, 1);
3863 dev_err(&pdev->dev, "failed to load firmware\n");
3864 goto abort_with_ioremap;
3866 myri10ge_probe_slices(mgp);
3867 status = myri10ge_alloc_slices(mgp);
3869 dev_err(&pdev->dev, "failed to alloc slice state\n");
3870 goto abort_with_firmware;
3872 netif_set_real_num_tx_queues(netdev, mgp->num_slices);
3873 netif_set_real_num_rx_queues(netdev, mgp->num_slices);
3874 status = myri10ge_reset(mgp);
3876 dev_err(&pdev->dev, "failed reset\n");
3877 goto abort_with_slices;
3879 #ifdef CONFIG_MYRI10GE_DCA
3880 myri10ge_setup_dca(mgp);
3882 pci_set_drvdata(pdev, mgp);
3884 /* MTU range: 68 - 9000 */
3885 netdev->min_mtu = ETH_MIN_MTU;
3886 netdev->max_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN;
3888 if (myri10ge_initial_mtu > netdev->max_mtu)
3889 myri10ge_initial_mtu = netdev->max_mtu;
3890 if (myri10ge_initial_mtu < netdev->min_mtu)
3891 myri10ge_initial_mtu = netdev->min_mtu;
3893 netdev->mtu = myri10ge_initial_mtu;
3895 netdev->netdev_ops = &myri10ge_netdev_ops;
3896 netdev->hw_features = mgp->features | NETIF_F_RXCSUM;
3898 /* fake NETIF_F_HW_VLAN_CTAG_RX for good GRO performance */
3899 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
3901 netdev->features = netdev->hw_features;
3904 netdev->features |= NETIF_F_HIGHDMA;
3906 netdev->vlan_features |= mgp->features;
3907 if (mgp->fw_ver_tiny < 37)
3908 netdev->vlan_features &= ~NETIF_F_TSO6;
3909 if (mgp->fw_ver_tiny < 32)
3910 netdev->vlan_features &= ~NETIF_F_TSO;
3912 /* make sure we can get an irq, and that MSI can be
3913 * setup (if available). */
3914 status = myri10ge_request_irq(mgp);
3916 goto abort_with_slices;
3917 myri10ge_free_irq(mgp);
3919 /* Save configuration space to be restored if the
3920 * nic resets due to a parity error */
3921 pci_save_state(pdev);
3923 /* Setup the watchdog timer */
3924 timer_setup(&mgp->watchdog_timer, myri10ge_watchdog_timer, 0);
3926 netdev->ethtool_ops = &myri10ge_ethtool_ops;
3927 INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog);
3928 status = register_netdev(netdev);
3930 dev_err(&pdev->dev, "register_netdev failed: %d\n", status);
3931 goto abort_with_state;
3933 if (mgp->msix_enabled)
3934 dev_info(dev, "%d MSI-X IRQs, tx bndry %d, fw %s, MTRR %s, WC Enabled\n",
3935 mgp->num_slices, mgp->tx_boundary, mgp->fw_name,
3936 (mgp->wc_cookie > 0 ? "Enabled" : "Disabled"));
3938 dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, MTRR %s, WC Enabled\n",
3939 mgp->msi_enabled ? "MSI" : "xPIC",
3940 pdev->irq, mgp->tx_boundary, mgp->fw_name,
3941 (mgp->wc_cookie > 0 ? "Enabled" : "Disabled"));
3947 pci_restore_state(pdev);
3950 myri10ge_free_slices(mgp);
3952 abort_with_firmware:
3953 kfree(mgp->msix_vectors);
3954 myri10ge_dummy_rdma(mgp, 0);
3957 if (mgp->mac_addr_string != NULL)
3959 "myri10ge_probe() failed: MAC=%s, SN=%ld\n",
3960 mgp->mac_addr_string, mgp->serial_number);
3964 arch_phys_wc_del(mgp->wc_cookie);
3965 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
3966 mgp->cmd, mgp->cmd_bus);
3969 pci_disable_device(pdev);
3972 set_fw_name(mgp, NULL, false);
3973 free_netdev(netdev);
3980 * Does what is necessary to shutdown one Myrinet device. Called
3981 * once for each Myrinet card by the kernel when a module is
3984 static void myri10ge_remove(struct pci_dev *pdev)
3986 struct myri10ge_priv *mgp;
3987 struct net_device *netdev;
3989 mgp = pci_get_drvdata(pdev);
3993 cancel_work_sync(&mgp->watchdog_work);
3995 unregister_netdev(netdev);
3997 #ifdef CONFIG_MYRI10GE_DCA
3998 myri10ge_teardown_dca(mgp);
4000 myri10ge_dummy_rdma(mgp, 0);
4002 /* avoid a memory leak */
4003 pci_restore_state(pdev);
4006 arch_phys_wc_del(mgp->wc_cookie);
4007 myri10ge_free_slices(mgp);
4008 kfree(mgp->msix_vectors);
4009 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
4010 mgp->cmd, mgp->cmd_bus);
4012 set_fw_name(mgp, NULL, false);
4013 free_netdev(netdev);
4014 pci_disable_device(pdev);
4017 #define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 0x0008
4018 #define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9 0x0009
4020 static const struct pci_device_id myri10ge_pci_tbl[] = {
4021 {PCI_DEVICE(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E)},
4023 (PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9)},
4027 MODULE_DEVICE_TABLE(pci, myri10ge_pci_tbl);
4029 static struct pci_driver myri10ge_driver = {
4031 .probe = myri10ge_probe,
4032 .remove = myri10ge_remove,
4033 .id_table = myri10ge_pci_tbl,
4035 .suspend = myri10ge_suspend,
4036 .resume = myri10ge_resume,
4040 #ifdef CONFIG_MYRI10GE_DCA
4042 myri10ge_notify_dca(struct notifier_block *nb, unsigned long event, void *p)
4044 int err = driver_for_each_device(&myri10ge_driver.driver,
4046 myri10ge_notify_dca_device);
4053 static struct notifier_block myri10ge_dca_notifier = {
4054 .notifier_call = myri10ge_notify_dca,
4058 #endif /* CONFIG_MYRI10GE_DCA */
4060 static __init int myri10ge_init_module(void)
4062 pr_info("Version %s\n", MYRI10GE_VERSION_STR);
4064 if (myri10ge_rss_hash > MXGEFW_RSS_HASH_TYPE_MAX) {
4065 pr_err("Illegal rssh hash type %d, defaulting to source port\n",
4067 myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_PORT;
4069 #ifdef CONFIG_MYRI10GE_DCA
4070 dca_register_notify(&myri10ge_dca_notifier);
4072 if (myri10ge_max_slices > MYRI10GE_MAX_SLICES)
4073 myri10ge_max_slices = MYRI10GE_MAX_SLICES;
4075 return pci_register_driver(&myri10ge_driver);
4078 module_init(myri10ge_init_module);
4080 static __exit void myri10ge_cleanup_module(void)
4082 #ifdef CONFIG_MYRI10GE_DCA
4083 dca_unregister_notify(&myri10ge_dca_notifier);
4085 pci_unregister_driver(&myri10ge_driver);
4088 module_exit(myri10ge_cleanup_module);