1 /*************************************************************************
2 * myri10ge.c: Myricom Myri-10G Ethernet driver.
4 * Copyright (C) 2005 - 2011 Myricom, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of Myricom, Inc. nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 * If the eeprom on your board is not recent enough, you will need to get a
33 * newer firmware image at:
34 * http://www.myri.com/scs/download-Myri10GE.html
36 * Contact Information:
38 * Myricom, Inc., 325N Santa Anita Avenue, Arcadia, CA 91006
39 *************************************************************************/
41 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
43 #include <linux/tcp.h>
44 #include <linux/netdevice.h>
45 #include <linux/skbuff.h>
46 #include <linux/string.h>
47 #include <linux/module.h>
48 #include <linux/pci.h>
49 #include <linux/dma-mapping.h>
50 #include <linux/etherdevice.h>
51 #include <linux/if_ether.h>
52 #include <linux/if_vlan.h>
53 #include <linux/dca.h>
55 #include <linux/inet.h>
57 #include <linux/ethtool.h>
58 #include <linux/firmware.h>
59 #include <linux/delay.h>
60 #include <linux/timer.h>
61 #include <linux/vmalloc.h>
62 #include <linux/crc32.h>
63 #include <linux/moduleparam.h>
65 #include <linux/log2.h>
66 #include <linux/slab.h>
67 #include <linux/prefetch.h>
68 #include <net/checksum.h>
71 #include <asm/byteorder.h>
72 #include <asm/processor.h>
73 #include <net/busy_poll.h>
75 #include "myri10ge_mcp.h"
76 #include "myri10ge_mcp_gen_header.h"
78 #define MYRI10GE_VERSION_STR "1.5.3-1.534"
80 MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
81 MODULE_AUTHOR("Maintainer: help@myri.com");
82 MODULE_VERSION(MYRI10GE_VERSION_STR);
83 MODULE_LICENSE("Dual BSD/GPL");
85 #define MYRI10GE_MAX_ETHER_MTU 9014
87 #define MYRI10GE_ETH_STOPPED 0
88 #define MYRI10GE_ETH_STOPPING 1
89 #define MYRI10GE_ETH_STARTING 2
90 #define MYRI10GE_ETH_RUNNING 3
91 #define MYRI10GE_ETH_OPEN_FAILED 4
93 #define MYRI10GE_EEPROM_STRINGS_SIZE 256
94 #define MYRI10GE_MAX_SEND_DESC_TSO ((65536 / 2048) * 2)
96 #define MYRI10GE_NO_CONFIRM_DATA htonl(0xffffffff)
97 #define MYRI10GE_NO_RESPONSE_RESULT 0xffffffff
99 #define MYRI10GE_ALLOC_ORDER 0
100 #define MYRI10GE_ALLOC_SIZE ((1 << MYRI10GE_ALLOC_ORDER) * PAGE_SIZE)
101 #define MYRI10GE_MAX_FRAGS_PER_FRAME (MYRI10GE_MAX_ETHER_MTU/MYRI10GE_ALLOC_SIZE + 1)
103 #define MYRI10GE_MAX_SLICES 32
105 struct myri10ge_rx_buffer_state {
108 DEFINE_DMA_UNMAP_ADDR(bus);
109 DEFINE_DMA_UNMAP_LEN(len);
112 struct myri10ge_tx_buffer_state {
115 DEFINE_DMA_UNMAP_ADDR(bus);
116 DEFINE_DMA_UNMAP_LEN(len);
119 struct myri10ge_cmd {
125 struct myri10ge_rx_buf {
126 struct mcp_kreq_ether_recv __iomem *lanai; /* lanai ptr for recv ring */
127 struct mcp_kreq_ether_recv *shadow; /* host shadow of recv ring */
128 struct myri10ge_rx_buffer_state *info;
135 int mask; /* number of rx slots -1 */
139 struct myri10ge_tx_buf {
140 struct mcp_kreq_ether_send __iomem *lanai; /* lanai ptr for sendq */
141 __be32 __iomem *send_go; /* "go" doorbell ptr */
142 __be32 __iomem *send_stop; /* "stop" doorbell ptr */
143 struct mcp_kreq_ether_send *req_list; /* host shadow of sendq */
145 struct myri10ge_tx_buffer_state *info;
146 int mask; /* number of transmit slots -1 */
147 int req ____cacheline_aligned; /* transmit slots submitted */
148 int pkt_start; /* packets started */
151 int done ____cacheline_aligned; /* transmit slots completed */
152 int pkt_done; /* packets completed */
157 struct myri10ge_rx_done {
158 struct mcp_slot *entry;
164 struct myri10ge_slice_netstats {
165 unsigned long rx_packets;
166 unsigned long tx_packets;
167 unsigned long rx_bytes;
168 unsigned long tx_bytes;
169 unsigned long rx_dropped;
170 unsigned long tx_dropped;
173 struct myri10ge_slice_state {
174 struct myri10ge_tx_buf tx; /* transmit ring */
175 struct myri10ge_rx_buf rx_small;
176 struct myri10ge_rx_buf rx_big;
177 struct myri10ge_rx_done rx_done;
178 struct net_device *dev;
179 struct napi_struct napi;
180 struct myri10ge_priv *mgp;
181 struct myri10ge_slice_netstats stats;
182 __be32 __iomem *irq_claim;
183 struct mcp_irq_data *fw_stats;
184 dma_addr_t fw_stats_bus;
185 int watchdog_tx_done;
187 int watchdog_rx_done;
189 #ifdef CONFIG_MYRI10GE_DCA
192 __be32 __iomem *dca_tag;
194 #ifdef CONFIG_NET_RX_BUSY_POLL
196 #define SLICE_STATE_IDLE 0
197 #define SLICE_STATE_NAPI 1 /* NAPI owns this slice */
198 #define SLICE_STATE_POLL 2 /* poll owns this slice */
199 #define SLICE_LOCKED (SLICE_STATE_NAPI | SLICE_STATE_POLL)
200 #define SLICE_STATE_NAPI_YIELD 4 /* NAPI yielded this slice */
201 #define SLICE_STATE_POLL_YIELD 8 /* poll yielded this slice */
202 #define SLICE_USER_PEND (SLICE_STATE_POLL | SLICE_STATE_POLL_YIELD)
204 unsigned long lock_napi_yield;
205 unsigned long lock_poll_yield;
206 unsigned long busy_poll_miss;
207 unsigned long busy_poll_cnt;
208 #endif /* CONFIG_NET_RX_BUSY_POLL */
212 struct myri10ge_priv {
213 struct myri10ge_slice_state *ss;
214 int tx_boundary; /* boundary transmits cannot cross */
216 int running; /* running? */
220 struct net_device *dev;
223 unsigned long board_span;
224 unsigned long iomem_base;
225 __be32 __iomem *irq_deassert;
226 char *mac_addr_string;
227 struct mcp_cmd_response *cmd;
229 struct pci_dev *pdev;
232 struct msix_entry *msix_vectors;
233 #ifdef CONFIG_MYRI10GE_DCA
238 unsigned int rdma_tags_available;
240 __be32 __iomem *intr_coal_delay_ptr;
243 wait_queue_head_t down_wq;
244 struct work_struct watchdog_work;
245 struct timer_list watchdog_timer;
249 bool fw_name_allocated;
251 char eeprom_strings[MYRI10GE_EEPROM_STRINGS_SIZE];
252 char *product_code_string;
253 char fw_version[128];
257 int adopted_rx_filter_bug;
258 u8 mac_addr[ETH_ALEN]; /* eeprom mac address */
259 unsigned long serial_number;
260 int vendor_specific_offset;
261 int fw_multicast_support;
269 unsigned int board_number;
273 static char *myri10ge_fw_unaligned = "/*(DEBLOBBED)*/";
274 static char *myri10ge_fw_aligned = "/*(DEBLOBBED)*/";
275 static char *myri10ge_fw_rss_unaligned = "/*(DEBLOBBED)*/";
276 static char *myri10ge_fw_rss_aligned = "/*(DEBLOBBED)*/";
279 /* Careful: must be accessed under kernel_param_lock() */
280 static char *myri10ge_fw_name = NULL;
281 module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR);
282 MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name");
284 #define MYRI10GE_MAX_BOARDS 8
285 static char *myri10ge_fw_names[MYRI10GE_MAX_BOARDS] =
286 {[0 ... (MYRI10GE_MAX_BOARDS - 1)] = NULL };
287 module_param_array_named(myri10ge_fw_names, myri10ge_fw_names, charp, NULL,
289 MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image names per board");
291 static int myri10ge_ecrc_enable = 1;
292 module_param(myri10ge_ecrc_enable, int, S_IRUGO);
293 MODULE_PARM_DESC(myri10ge_ecrc_enable, "Enable Extended CRC on PCI-E");
295 static int myri10ge_small_bytes = -1; /* -1 == auto */
296 module_param(myri10ge_small_bytes, int, S_IRUGO | S_IWUSR);
297 MODULE_PARM_DESC(myri10ge_small_bytes, "Threshold of small packets");
299 static int myri10ge_msi = 1; /* enable msi by default */
300 module_param(myri10ge_msi, int, S_IRUGO | S_IWUSR);
301 MODULE_PARM_DESC(myri10ge_msi, "Enable Message Signalled Interrupts");
303 static int myri10ge_intr_coal_delay = 75;
304 module_param(myri10ge_intr_coal_delay, int, S_IRUGO);
305 MODULE_PARM_DESC(myri10ge_intr_coal_delay, "Interrupt coalescing delay");
307 static int myri10ge_flow_control = 1;
308 module_param(myri10ge_flow_control, int, S_IRUGO);
309 MODULE_PARM_DESC(myri10ge_flow_control, "Pause parameter");
311 static int myri10ge_deassert_wait = 1;
312 module_param(myri10ge_deassert_wait, int, S_IRUGO | S_IWUSR);
313 MODULE_PARM_DESC(myri10ge_deassert_wait,
314 "Wait when deasserting legacy interrupts");
316 static int myri10ge_force_firmware = 0;
317 module_param(myri10ge_force_firmware, int, S_IRUGO);
318 MODULE_PARM_DESC(myri10ge_force_firmware,
319 "Force firmware to assume aligned completions");
321 static int myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN;
322 module_param(myri10ge_initial_mtu, int, S_IRUGO);
323 MODULE_PARM_DESC(myri10ge_initial_mtu, "Initial MTU");
325 static int myri10ge_napi_weight = 64;
326 module_param(myri10ge_napi_weight, int, S_IRUGO);
327 MODULE_PARM_DESC(myri10ge_napi_weight, "Set NAPI weight");
329 static int myri10ge_watchdog_timeout = 1;
330 module_param(myri10ge_watchdog_timeout, int, S_IRUGO);
331 MODULE_PARM_DESC(myri10ge_watchdog_timeout, "Set watchdog timeout");
333 static int myri10ge_max_irq_loops = 1048576;
334 module_param(myri10ge_max_irq_loops, int, S_IRUGO);
335 MODULE_PARM_DESC(myri10ge_max_irq_loops,
336 "Set stuck legacy IRQ detection threshold");
338 #define MYRI10GE_MSG_DEFAULT NETIF_MSG_LINK
340 static int myri10ge_debug = -1; /* defaults above */
341 module_param(myri10ge_debug, int, 0);
342 MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)");
344 static int myri10ge_fill_thresh = 256;
345 module_param(myri10ge_fill_thresh, int, S_IRUGO | S_IWUSR);
346 MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed");
348 static int myri10ge_reset_recover = 1;
350 static int myri10ge_max_slices = 1;
351 module_param(myri10ge_max_slices, int, S_IRUGO);
352 MODULE_PARM_DESC(myri10ge_max_slices, "Max tx/rx queues");
354 static int myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT;
355 module_param(myri10ge_rss_hash, int, S_IRUGO);
356 MODULE_PARM_DESC(myri10ge_rss_hash, "Type of RSS hashing to do");
358 static int myri10ge_dca = 1;
359 module_param(myri10ge_dca, int, S_IRUGO);
360 MODULE_PARM_DESC(myri10ge_dca, "Enable DCA if possible");
362 #define MYRI10GE_FW_OFFSET 1024*1024
363 #define MYRI10GE_HIGHPART_TO_U32(X) \
364 (sizeof (X) == 8) ? ((u32)((u64)(X) >> 32)) : (0)
365 #define MYRI10GE_LOWPART_TO_U32(X) ((u32)(X))
367 #define myri10ge_pio_copy(to,from,size) __iowrite64_copy(to,from,size/8)
369 static void myri10ge_set_multicast_list(struct net_device *dev);
370 static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
371 struct net_device *dev);
373 static inline void put_be32(__be32 val, __be32 __iomem * p)
375 __raw_writel((__force __u32) val, (__force void __iomem *)p);
378 static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev,
379 struct rtnl_link_stats64 *stats);
381 static void set_fw_name(struct myri10ge_priv *mgp, char *name, bool allocated)
383 if (mgp->fw_name_allocated)
386 mgp->fw_name_allocated = allocated;
390 myri10ge_send_cmd(struct myri10ge_priv *mgp, u32 cmd,
391 struct myri10ge_cmd *data, int atomic)
394 char buf_bytes[sizeof(*buf) + 8];
395 struct mcp_cmd_response *response = mgp->cmd;
396 char __iomem *cmd_addr = mgp->sram + MXGEFW_ETH_CMD;
397 u32 dma_low, dma_high, result, value;
400 /* ensure buf is aligned to 8 bytes */
401 buf = (struct mcp_cmd *)ALIGN((unsigned long)buf_bytes, 8);
403 buf->data0 = htonl(data->data0);
404 buf->data1 = htonl(data->data1);
405 buf->data2 = htonl(data->data2);
406 buf->cmd = htonl(cmd);
407 dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus);
408 dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus);
410 buf->response_addr.low = htonl(dma_low);
411 buf->response_addr.high = htonl(dma_high);
412 response->result = htonl(MYRI10GE_NO_RESPONSE_RESULT);
414 myri10ge_pio_copy(cmd_addr, buf, sizeof(*buf));
416 /* wait up to 15ms. Longest command is the DMA benchmark,
417 * which is capped at 5ms, but runs from a timeout handler
418 * that runs every 7.8ms. So a 15ms timeout leaves us with
422 /* if atomic is set, do not sleep,
423 * and try to get the completion quickly
424 * (1ms will be enough for those commands) */
425 for (sleep_total = 0;
426 sleep_total < 1000 &&
427 response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT);
433 /* use msleep for most command */
434 for (sleep_total = 0;
436 response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT);
441 result = ntohl(response->result);
442 value = ntohl(response->data);
443 if (result != MYRI10GE_NO_RESPONSE_RESULT) {
447 } else if (result == MXGEFW_CMD_UNKNOWN) {
449 } else if (result == MXGEFW_CMD_ERROR_UNALIGNED) {
451 } else if (result == MXGEFW_CMD_ERROR_RANGE &&
452 cmd == MXGEFW_CMD_ENABLE_RSS_QUEUES &&
454 data1 & MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES) !=
458 dev_err(&mgp->pdev->dev,
459 "command %d failed, result = %d\n",
465 dev_err(&mgp->pdev->dev, "command %d timed out, result = %d\n",
471 * The eeprom strings on the lanaiX have the format
474 * PT:ddd mmm xx xx:xx:xx xx\0
475 * PV:ddd mmm xx xx:xx:xx xx\0
477 static int myri10ge_read_mac_addr(struct myri10ge_priv *mgp)
482 ptr = mgp->eeprom_strings;
483 limit = mgp->eeprom_strings + MYRI10GE_EEPROM_STRINGS_SIZE;
485 while (*ptr != '\0' && ptr < limit) {
486 if (memcmp(ptr, "MAC=", 4) == 0) {
488 mgp->mac_addr_string = ptr;
489 for (i = 0; i < 6; i++) {
490 if ((ptr + 2) > limit)
493 simple_strtoul(ptr, &ptr, 16);
497 if (memcmp(ptr, "PC=", 3) == 0) {
499 mgp->product_code_string = ptr;
501 if (memcmp((const void *)ptr, "SN=", 3) == 0) {
503 mgp->serial_number = simple_strtoul(ptr, &ptr, 10);
505 while (ptr < limit && *ptr++) ;
511 dev_err(&mgp->pdev->dev, "failed to parse eeprom_strings\n");
516 * Enable or disable periodic RDMAs from the host to make certain
517 * chipsets resend dropped PCIe messages
520 static void myri10ge_dummy_rdma(struct myri10ge_priv *mgp, int enable)
522 char __iomem *submit;
523 __be32 buf[16] __attribute__ ((__aligned__(8)));
524 u32 dma_low, dma_high;
527 /* clear confirmation addr */
531 /* send a rdma command to the PCIe engine, and wait for the
532 * response in the confirmation address. The firmware should
533 * write a -1 there to indicate it is alive and well
535 dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus);
536 dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus);
538 buf[0] = htonl(dma_high); /* confirm addr MSW */
539 buf[1] = htonl(dma_low); /* confirm addr LSW */
540 buf[2] = MYRI10GE_NO_CONFIRM_DATA; /* confirm data */
541 buf[3] = htonl(dma_high); /* dummy addr MSW */
542 buf[4] = htonl(dma_low); /* dummy addr LSW */
543 buf[5] = htonl(enable); /* enable? */
545 submit = mgp->sram + MXGEFW_BOOT_DUMMY_RDMA;
547 myri10ge_pio_copy(submit, &buf, sizeof(buf));
548 for (i = 0; mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 20; i++)
550 if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA)
551 dev_err(&mgp->pdev->dev, "dummy rdma %s failed\n",
552 (enable ? "enable" : "disable"));
556 myri10ge_validate_firmware(struct myri10ge_priv *mgp,
557 struct mcp_gen_header *hdr)
559 struct device *dev = &mgp->pdev->dev;
561 /* check firmware type */
562 if (ntohl(hdr->mcp_type) != MCP_TYPE_ETH) {
563 dev_err(dev, "Bad firmware type: 0x%x\n", ntohl(hdr->mcp_type));
567 /* save firmware version for ethtool */
568 strncpy(mgp->fw_version, hdr->version, sizeof(mgp->fw_version));
569 mgp->fw_version[sizeof(mgp->fw_version) - 1] = '\0';
571 sscanf(mgp->fw_version, "%d.%d.%d", &mgp->fw_ver_major,
572 &mgp->fw_ver_minor, &mgp->fw_ver_tiny);
574 if (!(mgp->fw_ver_major == MXGEFW_VERSION_MAJOR &&
575 mgp->fw_ver_minor == MXGEFW_VERSION_MINOR)) {
576 dev_err(dev, "Found firmware version %s\n", mgp->fw_version);
577 dev_err(dev, "Driver needs %d.%d\n", MXGEFW_VERSION_MAJOR,
578 MXGEFW_VERSION_MINOR);
584 static int myri10ge_load_hotplug_firmware(struct myri10ge_priv *mgp, u32 * size)
586 unsigned crc, reread_crc;
587 const struct firmware *fw;
588 struct device *dev = &mgp->pdev->dev;
589 unsigned char *fw_readback;
590 struct mcp_gen_header *hdr;
595 if ((status = reject_firmware(&fw, mgp->fw_name, dev)) < 0) {
596 dev_err(dev, "Unable to load %s firmware image via hotplug\n",
599 goto abort_with_nothing;
604 if (fw->size >= mgp->sram_size - MYRI10GE_FW_OFFSET ||
605 fw->size < MCP_HEADER_PTR_OFFSET + 4) {
606 dev_err(dev, "Firmware size invalid:%d\n", (int)fw->size);
612 hdr_offset = ntohl(*(__be32 *) (fw->data + MCP_HEADER_PTR_OFFSET));
613 if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > fw->size) {
614 dev_err(dev, "Bad firmware file\n");
618 hdr = (void *)(fw->data + hdr_offset);
620 status = myri10ge_validate_firmware(mgp, hdr);
624 crc = crc32(~0, fw->data, fw->size);
625 for (i = 0; i < fw->size; i += 256) {
626 myri10ge_pio_copy(mgp->sram + MYRI10GE_FW_OFFSET + i,
628 min(256U, (unsigned)(fw->size - i)));
632 fw_readback = vmalloc(fw->size);
637 /* corruption checking is good for parity recovery and buggy chipset */
638 memcpy_fromio(fw_readback, mgp->sram + MYRI10GE_FW_OFFSET, fw->size);
639 reread_crc = crc32(~0, fw_readback, fw->size);
641 if (crc != reread_crc) {
642 dev_err(dev, "CRC failed(fw-len=%u), got 0x%x (expect 0x%x)\n",
643 (unsigned)fw->size, reread_crc, crc);
647 *size = (u32) fw->size;
650 release_firmware(fw);
656 static int myri10ge_adopt_running_firmware(struct myri10ge_priv *mgp)
658 struct mcp_gen_header *hdr;
659 struct device *dev = &mgp->pdev->dev;
660 const size_t bytes = sizeof(struct mcp_gen_header);
664 /* find running firmware header */
665 hdr_offset = swab32(readl(mgp->sram + MCP_HEADER_PTR_OFFSET));
667 if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > mgp->sram_size) {
668 dev_err(dev, "Running firmware has bad header offset (%d)\n",
673 /* copy header of running firmware from SRAM to host memory to
674 * validate firmware */
675 hdr = kmalloc(bytes, GFP_KERNEL);
679 memcpy_fromio(hdr, mgp->sram + hdr_offset, bytes);
680 status = myri10ge_validate_firmware(mgp, hdr);
683 /* check to see if adopted firmware has bug where adopting
684 * it will cause broadcasts to be filtered unless the NIC
685 * is kept in ALLMULTI mode */
686 if (mgp->fw_ver_major == 1 && mgp->fw_ver_minor == 4 &&
687 mgp->fw_ver_tiny >= 4 && mgp->fw_ver_tiny <= 11) {
688 mgp->adopted_rx_filter_bug = 1;
689 dev_warn(dev, "Adopting fw %d.%d.%d: "
690 "working around rx filter bug\n",
691 mgp->fw_ver_major, mgp->fw_ver_minor,
697 static int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp)
699 struct myri10ge_cmd cmd;
702 /* probe for IPv6 TSO support */
703 mgp->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO;
704 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE,
707 mgp->max_tso6 = cmd.data0;
708 mgp->features |= NETIF_F_TSO6;
711 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0);
713 dev_err(&mgp->pdev->dev,
714 "failed MXGEFW_CMD_GET_RX_RING_SIZE\n");
718 mgp->max_intr_slots = 2 * (cmd.data0 / sizeof(struct mcp_dma_addr));
723 static int myri10ge_load_firmware(struct myri10ge_priv *mgp, int adopt)
725 char __iomem *submit;
726 __be32 buf[16] __attribute__ ((__aligned__(8)));
727 u32 dma_low, dma_high, size;
731 status = myri10ge_load_hotplug_firmware(mgp, &size);
735 dev_warn(&mgp->pdev->dev, "hotplug firmware loading failed\n");
737 /* Do not attempt to adopt firmware if there
742 status = myri10ge_adopt_running_firmware(mgp);
744 dev_err(&mgp->pdev->dev,
745 "failed to adopt running firmware\n");
748 dev_info(&mgp->pdev->dev,
749 "Successfully adopted running firmware\n");
750 if (mgp->tx_boundary == 4096) {
751 dev_warn(&mgp->pdev->dev,
752 "Using firmware currently running on NIC"
754 dev_warn(&mgp->pdev->dev,
755 "performance consider loading optimized "
757 dev_warn(&mgp->pdev->dev, "via hotplug\n");
760 set_fw_name(mgp, "adopted", false);
761 mgp->tx_boundary = 2048;
762 myri10ge_dummy_rdma(mgp, 1);
763 status = myri10ge_get_firmware_capabilities(mgp);
767 /* clear confirmation addr */
771 /* send a reload command to the bootstrap MCP, and wait for the
772 * response in the confirmation address. The firmware should
773 * write a -1 there to indicate it is alive and well
775 dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus);
776 dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus);
778 buf[0] = htonl(dma_high); /* confirm addr MSW */
779 buf[1] = htonl(dma_low); /* confirm addr LSW */
780 buf[2] = MYRI10GE_NO_CONFIRM_DATA; /* confirm data */
782 /* FIX: All newest firmware should un-protect the bottom of
783 * the sram before handoff. However, the very first interfaces
784 * do not. Therefore the handoff copy must skip the first 8 bytes
786 buf[3] = htonl(MYRI10GE_FW_OFFSET + 8); /* where the code starts */
787 buf[4] = htonl(size - 8); /* length of code */
788 buf[5] = htonl(8); /* where to copy to */
789 buf[6] = htonl(0); /* where to jump to */
791 submit = mgp->sram + MXGEFW_BOOT_HANDOFF;
793 myri10ge_pio_copy(submit, &buf, sizeof(buf));
798 while (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 9) {
802 if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA) {
803 dev_err(&mgp->pdev->dev, "handoff failed\n");
806 myri10ge_dummy_rdma(mgp, 1);
807 status = myri10ge_get_firmware_capabilities(mgp);
812 static int myri10ge_update_mac_address(struct myri10ge_priv *mgp, u8 * addr)
814 struct myri10ge_cmd cmd;
817 cmd.data0 = ((addr[0] << 24) | (addr[1] << 16)
818 | (addr[2] << 8) | addr[3]);
820 cmd.data1 = ((addr[4] << 8) | (addr[5]));
822 status = myri10ge_send_cmd(mgp, MXGEFW_SET_MAC_ADDRESS, &cmd, 0);
826 static int myri10ge_change_pause(struct myri10ge_priv *mgp, int pause)
828 struct myri10ge_cmd cmd;
831 ctl = pause ? MXGEFW_ENABLE_FLOW_CONTROL : MXGEFW_DISABLE_FLOW_CONTROL;
832 status = myri10ge_send_cmd(mgp, ctl, &cmd, 0);
835 netdev_err(mgp->dev, "Failed to set flow control mode\n");
843 myri10ge_change_promisc(struct myri10ge_priv *mgp, int promisc, int atomic)
845 struct myri10ge_cmd cmd;
848 ctl = promisc ? MXGEFW_ENABLE_PROMISC : MXGEFW_DISABLE_PROMISC;
849 status = myri10ge_send_cmd(mgp, ctl, &cmd, atomic);
851 netdev_err(mgp->dev, "Failed to set promisc mode\n");
854 static int myri10ge_dma_test(struct myri10ge_priv *mgp, int test_type)
856 struct myri10ge_cmd cmd;
859 struct page *dmatest_page;
860 dma_addr_t dmatest_bus;
863 dmatest_page = alloc_page(GFP_KERNEL);
866 dmatest_bus = pci_map_page(mgp->pdev, dmatest_page, 0, PAGE_SIZE,
868 if (unlikely(pci_dma_mapping_error(mgp->pdev, dmatest_bus))) {
869 __free_page(dmatest_page);
873 /* Run a small DMA test.
874 * The magic multipliers to the length tell the firmware
875 * to do DMA read, write, or read+write tests. The
876 * results are returned in cmd.data0. The upper 16
877 * bits or the return is the number of transfers completed.
878 * The lower 16 bits is the time in 0.5us ticks that the
879 * transfers took to complete.
882 len = mgp->tx_boundary;
884 cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus);
885 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus);
886 cmd.data2 = len * 0x10000;
887 status = myri10ge_send_cmd(mgp, test_type, &cmd, 0);
892 mgp->read_dma = ((cmd.data0 >> 16) * len * 2) / (cmd.data0 & 0xffff);
893 cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus);
894 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus);
895 cmd.data2 = len * 0x1;
896 status = myri10ge_send_cmd(mgp, test_type, &cmd, 0);
901 mgp->write_dma = ((cmd.data0 >> 16) * len * 2) / (cmd.data0 & 0xffff);
903 cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus);
904 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus);
905 cmd.data2 = len * 0x10001;
906 status = myri10ge_send_cmd(mgp, test_type, &cmd, 0);
911 mgp->read_write_dma = ((cmd.data0 >> 16) * len * 2 * 2) /
912 (cmd.data0 & 0xffff);
915 pci_unmap_page(mgp->pdev, dmatest_bus, PAGE_SIZE, DMA_BIDIRECTIONAL);
916 put_page(dmatest_page);
918 if (status != 0 && test_type != MXGEFW_CMD_UNALIGNED_TEST)
919 dev_warn(&mgp->pdev->dev, "DMA %s benchmark failed: %d\n",
925 #ifdef CONFIG_NET_RX_BUSY_POLL
926 static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss)
928 spin_lock_init(&ss->lock);
929 ss->state = SLICE_STATE_IDLE;
932 static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss)
935 spin_lock(&ss->lock);
936 if ((ss->state & SLICE_LOCKED)) {
937 WARN_ON((ss->state & SLICE_STATE_NAPI));
938 ss->state |= SLICE_STATE_NAPI_YIELD;
940 ss->lock_napi_yield++;
942 ss->state = SLICE_STATE_NAPI;
943 spin_unlock(&ss->lock);
947 static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss)
949 spin_lock(&ss->lock);
950 WARN_ON((ss->state & (SLICE_STATE_POLL | SLICE_STATE_NAPI_YIELD)));
951 ss->state = SLICE_STATE_IDLE;
952 spin_unlock(&ss->lock);
955 static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss)
958 spin_lock_bh(&ss->lock);
959 if ((ss->state & SLICE_LOCKED)) {
960 ss->state |= SLICE_STATE_POLL_YIELD;
962 ss->lock_poll_yield++;
964 ss->state |= SLICE_STATE_POLL;
965 spin_unlock_bh(&ss->lock);
969 static inline void myri10ge_ss_unlock_poll(struct myri10ge_slice_state *ss)
971 spin_lock_bh(&ss->lock);
972 WARN_ON((ss->state & SLICE_STATE_NAPI));
973 ss->state = SLICE_STATE_IDLE;
974 spin_unlock_bh(&ss->lock);
977 static inline bool myri10ge_ss_busy_polling(struct myri10ge_slice_state *ss)
979 WARN_ON(!(ss->state & SLICE_LOCKED));
980 return (ss->state & SLICE_USER_PEND);
982 #else /* CONFIG_NET_RX_BUSY_POLL */
983 static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss)
987 static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss)
992 static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss)
996 static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss)
1001 static inline void myri10ge_ss_unlock_poll(struct myri10ge_slice_state *ss)
1005 static inline bool myri10ge_ss_busy_polling(struct myri10ge_slice_state *ss)
1011 static int myri10ge_reset(struct myri10ge_priv *mgp)
1013 struct myri10ge_cmd cmd;
1014 struct myri10ge_slice_state *ss;
1017 #ifdef CONFIG_MYRI10GE_DCA
1018 unsigned long dca_tag_off;
1021 /* try to send a reset command to the card to see if it
1023 memset(&cmd, 0, sizeof(cmd));
1024 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_RESET, &cmd, 0);
1026 dev_err(&mgp->pdev->dev, "failed reset\n");
1030 (void)myri10ge_dma_test(mgp, MXGEFW_DMA_TEST);
1032 * Use non-ndis mcp_slot (eg, 4 bytes total,
1033 * no toeplitz hash value returned. Older firmware will
1034 * not understand this command, but will use the correct
1035 * sized mcp_slot, so we ignore error returns
1037 cmd.data0 = MXGEFW_RSS_MCP_SLOT_TYPE_MIN;
1038 (void)myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_MCP_SLOT_TYPE, &cmd, 0);
1040 /* Now exchange information about interrupts */
1042 bytes = mgp->max_intr_slots * sizeof(*mgp->ss[0].rx_done.entry);
1043 cmd.data0 = (u32) bytes;
1044 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0);
1047 * Even though we already know how many slices are supported
1048 * via myri10ge_probe_slices() MXGEFW_CMD_GET_MAX_RSS_QUEUES
1049 * has magic side effects, and must be called after a reset.
1050 * It must be called prior to calling any RSS related cmds,
1051 * including assigning an interrupt queue for anything but
1052 * slice 0. It must also be called *after*
1053 * MXGEFW_CMD_SET_INTRQ_SIZE, since the intrq size is used by
1054 * the firmware to compute offsets.
1057 if (mgp->num_slices > 1) {
1059 /* ask the maximum number of slices it supports */
1060 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_RSS_QUEUES,
1063 dev_err(&mgp->pdev->dev,
1064 "failed to get number of slices\n");
1068 * MXGEFW_CMD_ENABLE_RSS_QUEUES must be called prior
1069 * to setting up the interrupt queue DMA
1072 cmd.data0 = mgp->num_slices;
1073 cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE;
1074 if (mgp->dev->real_num_tx_queues > 1)
1075 cmd.data1 |= MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES;
1076 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES,
1079 /* Firmware older than 1.4.32 only supports multiple
1080 * RX queues, so if we get an error, first retry using a
1081 * single TX queue before giving up */
1082 if (status != 0 && mgp->dev->real_num_tx_queues > 1) {
1083 netif_set_real_num_tx_queues(mgp->dev, 1);
1084 cmd.data0 = mgp->num_slices;
1085 cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE;
1086 status = myri10ge_send_cmd(mgp,
1087 MXGEFW_CMD_ENABLE_RSS_QUEUES,
1092 dev_err(&mgp->pdev->dev,
1093 "failed to set number of slices\n");
1098 for (i = 0; i < mgp->num_slices; i++) {
1100 cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->rx_done.bus);
1101 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->rx_done.bus);
1103 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_DMA,
1108 myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0);
1109 for (i = 0; i < mgp->num_slices; i++) {
1112 (__iomem __be32 *) (mgp->sram + cmd.data0 + 8 * i);
1114 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET,
1116 mgp->irq_deassert = (__iomem __be32 *) (mgp->sram + cmd.data0);
1118 status |= myri10ge_send_cmd
1119 (mgp, MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET, &cmd, 0);
1120 mgp->intr_coal_delay_ptr = (__iomem __be32 *) (mgp->sram + cmd.data0);
1122 dev_err(&mgp->pdev->dev, "failed set interrupt parameters\n");
1125 put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
1127 #ifdef CONFIG_MYRI10GE_DCA
1128 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_DCA_OFFSET, &cmd, 0);
1129 dca_tag_off = cmd.data0;
1130 for (i = 0; i < mgp->num_slices; i++) {
1133 ss->dca_tag = (__iomem __be32 *)
1134 (mgp->sram + dca_tag_off + 4 * i);
1139 #endif /* CONFIG_MYRI10GE_DCA */
1141 /* reset mcp/driver shared state back to 0 */
1143 mgp->link_changes = 0;
1144 for (i = 0; i < mgp->num_slices; i++) {
1147 memset(ss->rx_done.entry, 0, bytes);
1150 ss->tx.pkt_start = 0;
1151 ss->tx.pkt_done = 0;
1153 ss->rx_small.cnt = 0;
1154 ss->rx_done.idx = 0;
1155 ss->rx_done.cnt = 0;
1156 ss->tx.wake_queue = 0;
1157 ss->tx.stop_queue = 0;
1160 status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr);
1161 myri10ge_change_pause(mgp, mgp->pause);
1162 myri10ge_set_multicast_list(mgp->dev);
1166 #ifdef CONFIG_MYRI10GE_DCA
1167 static int myri10ge_toggle_relaxed(struct pci_dev *pdev, int on)
1172 pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &ctl);
1174 ret = (ctl & PCI_EXP_DEVCTL_RELAX_EN) >> 4;
1176 ctl &= ~PCI_EXP_DEVCTL_RELAX_EN;
1178 pcie_capability_write_word(pdev, PCI_EXP_DEVCTL, ctl);
1184 myri10ge_write_dca(struct myri10ge_slice_state *ss, int cpu, int tag)
1186 ss->cached_dca_tag = tag;
1187 put_be32(htonl(tag), ss->dca_tag);
1190 static inline void myri10ge_update_dca(struct myri10ge_slice_state *ss)
1192 int cpu = get_cpu();
1195 if (cpu != ss->cpu) {
1196 tag = dca3_get_tag(&ss->mgp->pdev->dev, cpu);
1197 if (ss->cached_dca_tag != tag)
1198 myri10ge_write_dca(ss, cpu, tag);
1204 static void myri10ge_setup_dca(struct myri10ge_priv *mgp)
1207 struct pci_dev *pdev = mgp->pdev;
1209 if (mgp->ss[0].dca_tag == NULL || mgp->dca_enabled)
1211 if (!myri10ge_dca) {
1212 dev_err(&pdev->dev, "dca disabled by administrator\n");
1215 err = dca_add_requester(&pdev->dev);
1219 "dca_add_requester() failed, err=%d\n", err);
1222 mgp->relaxed_order = myri10ge_toggle_relaxed(pdev, 0);
1223 mgp->dca_enabled = 1;
1224 for (i = 0; i < mgp->num_slices; i++) {
1225 mgp->ss[i].cpu = -1;
1226 mgp->ss[i].cached_dca_tag = -1;
1227 myri10ge_update_dca(&mgp->ss[i]);
1231 static void myri10ge_teardown_dca(struct myri10ge_priv *mgp)
1233 struct pci_dev *pdev = mgp->pdev;
1235 if (!mgp->dca_enabled)
1237 mgp->dca_enabled = 0;
1238 if (mgp->relaxed_order)
1239 myri10ge_toggle_relaxed(pdev, 1);
1240 dca_remove_requester(&pdev->dev);
1243 static int myri10ge_notify_dca_device(struct device *dev, void *data)
1245 struct myri10ge_priv *mgp;
1246 unsigned long event;
1248 mgp = dev_get_drvdata(dev);
1249 event = *(unsigned long *)data;
1251 if (event == DCA_PROVIDER_ADD)
1252 myri10ge_setup_dca(mgp);
1253 else if (event == DCA_PROVIDER_REMOVE)
1254 myri10ge_teardown_dca(mgp);
1257 #endif /* CONFIG_MYRI10GE_DCA */
1260 myri10ge_submit_8rx(struct mcp_kreq_ether_recv __iomem * dst,
1261 struct mcp_kreq_ether_recv *src)
1265 low = src->addr_low;
1266 src->addr_low = htonl(DMA_BIT_MASK(32));
1267 myri10ge_pio_copy(dst, src, 4 * sizeof(*src));
1269 myri10ge_pio_copy(dst + 4, src + 4, 4 * sizeof(*src));
1271 src->addr_low = low;
1272 put_be32(low, &dst->addr_low);
1276 static inline void myri10ge_vlan_ip_csum(struct sk_buff *skb, __wsum hw_csum)
1278 struct vlan_hdr *vh = (struct vlan_hdr *)(skb->data);
1280 if ((skb->protocol == htons(ETH_P_8021Q)) &&
1281 (vh->h_vlan_encapsulated_proto == htons(ETH_P_IP) ||
1282 vh->h_vlan_encapsulated_proto == htons(ETH_P_IPV6))) {
1283 skb->csum = hw_csum;
1284 skb->ip_summed = CHECKSUM_COMPLETE;
1289 myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
1290 int bytes, int watchdog)
1295 #if MYRI10GE_ALLOC_SIZE > 4096
1299 if (unlikely(rx->watchdog_needed && !watchdog))
1302 /* try to refill entire ring */
1303 while (rx->fill_cnt != (rx->cnt + rx->mask + 1)) {
1304 idx = rx->fill_cnt & rx->mask;
1305 if (rx->page_offset + bytes <= MYRI10GE_ALLOC_SIZE) {
1306 /* we can use part of previous page */
1309 /* we need a new page */
1311 alloc_pages(GFP_ATOMIC | __GFP_COMP,
1312 MYRI10GE_ALLOC_ORDER);
1313 if (unlikely(page == NULL)) {
1314 if (rx->fill_cnt - rx->cnt < 16)
1315 rx->watchdog_needed = 1;
1319 bus = pci_map_page(mgp->pdev, page, 0,
1320 MYRI10GE_ALLOC_SIZE,
1321 PCI_DMA_FROMDEVICE);
1322 if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) {
1323 __free_pages(page, MYRI10GE_ALLOC_ORDER);
1324 if (rx->fill_cnt - rx->cnt < 16)
1325 rx->watchdog_needed = 1;
1330 rx->page_offset = 0;
1334 rx->info[idx].page = rx->page;
1335 rx->info[idx].page_offset = rx->page_offset;
1336 /* note that this is the address of the start of the
1338 dma_unmap_addr_set(&rx->info[idx], bus, rx->bus);
1339 rx->shadow[idx].addr_low =
1340 htonl(MYRI10GE_LOWPART_TO_U32(rx->bus) + rx->page_offset);
1341 rx->shadow[idx].addr_high =
1342 htonl(MYRI10GE_HIGHPART_TO_U32(rx->bus));
1344 /* start next packet on a cacheline boundary */
1345 rx->page_offset += SKB_DATA_ALIGN(bytes);
1347 #if MYRI10GE_ALLOC_SIZE > 4096
1348 /* don't cross a 4KB boundary */
1349 end_offset = rx->page_offset + bytes - 1;
1350 if ((unsigned)(rx->page_offset ^ end_offset) > 4095)
1351 rx->page_offset = end_offset & ~4095;
1355 /* copy 8 descriptors to the firmware at a time */
1356 if ((idx & 7) == 7) {
1357 myri10ge_submit_8rx(&rx->lanai[idx - 7],
1358 &rx->shadow[idx - 7]);
1364 myri10ge_unmap_rx_page(struct pci_dev *pdev,
1365 struct myri10ge_rx_buffer_state *info, int bytes)
1367 /* unmap the recvd page if we're the only or last user of it */
1368 if (bytes >= MYRI10GE_ALLOC_SIZE / 2 ||
1369 (info->page_offset + 2 * bytes) > MYRI10GE_ALLOC_SIZE) {
1370 pci_unmap_page(pdev, (dma_unmap_addr(info, bus)
1371 & ~(MYRI10GE_ALLOC_SIZE - 1)),
1372 MYRI10GE_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
1377 * GRO does not support acceleration of tagged vlan frames, and
1378 * this NIC does not support vlan tag offload, so we must pop
1379 * the tag ourselves to be able to achieve GRO performance that
1380 * is comparable to LRO.
1384 myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb)
1387 struct vlan_ethhdr *veh;
1388 struct skb_frag_struct *frag;
1393 veh = (struct vlan_ethhdr *)va;
1394 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
1395 NETIF_F_HW_VLAN_CTAG_RX &&
1396 veh->h_vlan_proto == htons(ETH_P_8021Q)) {
1397 /* fixup csum if needed */
1398 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1399 vsum = csum_partial(va + ETH_HLEN, VLAN_HLEN, 0);
1400 skb->csum = csum_sub(skb->csum, vsum);
1403 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(veh->h_vlan_TCI));
1404 memmove(va + VLAN_HLEN, va, 2 * ETH_ALEN);
1405 skb->len -= VLAN_HLEN;
1406 skb->data_len -= VLAN_HLEN;
1407 frag = skb_shinfo(skb)->frags;
1408 frag->page_offset += VLAN_HLEN;
1409 skb_frag_size_set(frag, skb_frag_size(frag) - VLAN_HLEN);
1413 #define MYRI10GE_HLEN 64 /* Bytes to copy from page to skb linear memory */
1416 myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
1418 struct myri10ge_priv *mgp = ss->mgp;
1419 struct sk_buff *skb;
1420 struct skb_frag_struct *rx_frags;
1421 struct myri10ge_rx_buf *rx;
1422 int i, idx, remainder, bytes;
1423 struct pci_dev *pdev = mgp->pdev;
1424 struct net_device *dev = mgp->dev;
1428 if (len <= mgp->small_bytes) {
1430 bytes = mgp->small_bytes;
1433 bytes = mgp->big_bytes;
1437 idx = rx->cnt & rx->mask;
1438 va = page_address(rx->info[idx].page) + rx->info[idx].page_offset;
1441 /* When busy polling in user context, allocate skb and copy headers to
1442 * skb's linear memory ourselves. When not busy polling, use the napi
1445 polling = myri10ge_ss_busy_polling(ss);
1447 skb = netdev_alloc_skb(dev, MYRI10GE_HLEN + 16);
1449 skb = napi_get_frags(&ss->napi);
1450 if (unlikely(skb == NULL)) {
1451 ss->stats.rx_dropped++;
1452 for (i = 0, remainder = len; remainder > 0; i++) {
1453 myri10ge_unmap_rx_page(pdev, &rx->info[idx], bytes);
1454 put_page(rx->info[idx].page);
1456 idx = rx->cnt & rx->mask;
1457 remainder -= MYRI10GE_ALLOC_SIZE;
1461 rx_frags = skb_shinfo(skb)->frags;
1462 /* Fill skb_frag_struct(s) with data from our receive */
1463 for (i = 0, remainder = len; remainder > 0; i++) {
1464 myri10ge_unmap_rx_page(pdev, &rx->info[idx], bytes);
1465 skb_fill_page_desc(skb, i, rx->info[idx].page,
1466 rx->info[idx].page_offset,
1467 remainder < MYRI10GE_ALLOC_SIZE ?
1468 remainder : MYRI10GE_ALLOC_SIZE);
1470 idx = rx->cnt & rx->mask;
1471 remainder -= MYRI10GE_ALLOC_SIZE;
1474 /* remove padding */
1475 rx_frags[0].page_offset += MXGEFW_PAD;
1476 rx_frags[0].size -= MXGEFW_PAD;
1480 skb->data_len = len;
1481 skb->truesize += len;
1482 if (dev->features & NETIF_F_RXCSUM) {
1483 skb->ip_summed = CHECKSUM_COMPLETE;
1486 myri10ge_vlan_rx(mgp->dev, va, skb);
1487 skb_record_rx_queue(skb, ss - &mgp->ss[0]);
1492 /* myri10ge_vlan_rx might have moved the header, so compute
1493 * length and address again.
1495 hlen = MYRI10GE_HLEN > skb->len ? skb->len : MYRI10GE_HLEN;
1496 va = page_address(skb_frag_page(&rx_frags[0])) +
1497 rx_frags[0].page_offset;
1498 /* Copy header into the skb linear memory */
1499 skb_copy_to_linear_data(skb, va, hlen);
1500 rx_frags[0].page_offset += hlen;
1501 rx_frags[0].size -= hlen;
1502 skb->data_len -= hlen;
1504 skb->protocol = eth_type_trans(skb, dev);
1505 skb_mark_napi_id(skb, &ss->napi);
1506 netif_receive_skb(skb);
1509 napi_gro_frags(&ss->napi);
1515 myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index)
1517 struct pci_dev *pdev = ss->mgp->pdev;
1518 struct myri10ge_tx_buf *tx = &ss->tx;
1519 struct netdev_queue *dev_queue;
1520 struct sk_buff *skb;
1523 while (tx->pkt_done != mcp_index) {
1524 idx = tx->done & tx->mask;
1525 skb = tx->info[idx].skb;
1528 tx->info[idx].skb = NULL;
1529 if (tx->info[idx].last) {
1531 tx->info[idx].last = 0;
1534 len = dma_unmap_len(&tx->info[idx], len);
1535 dma_unmap_len_set(&tx->info[idx], len, 0);
1537 ss->stats.tx_bytes += skb->len;
1538 ss->stats.tx_packets++;
1539 dev_kfree_skb_irq(skb);
1541 pci_unmap_single(pdev,
1542 dma_unmap_addr(&tx->info[idx],
1547 pci_unmap_page(pdev,
1548 dma_unmap_addr(&tx->info[idx],
1554 dev_queue = netdev_get_tx_queue(ss->dev, ss - ss->mgp->ss);
1556 * Make a minimal effort to prevent the NIC from polling an
1557 * idle tx queue. If we can't get the lock we leave the queue
1558 * active. In this case, either a thread was about to start
1559 * using the queue anyway, or we lost a race and the NIC will
1560 * waste some of its resources polling an inactive queue for a
1564 if ((ss->mgp->dev->real_num_tx_queues > 1) &&
1565 __netif_tx_trylock(dev_queue)) {
1566 if (tx->req == tx->done) {
1567 tx->queue_active = 0;
1568 put_be32(htonl(1), tx->send_stop);
1572 __netif_tx_unlock(dev_queue);
1575 /* start the queue if we've stopped it */
1576 if (netif_tx_queue_stopped(dev_queue) &&
1577 tx->req - tx->done < (tx->mask >> 1) &&
1578 ss->mgp->running == MYRI10GE_ETH_RUNNING) {
1580 netif_tx_wake_queue(dev_queue);
1585 myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget)
1587 struct myri10ge_rx_done *rx_done = &ss->rx_done;
1588 struct myri10ge_priv *mgp = ss->mgp;
1589 unsigned long rx_bytes = 0;
1590 unsigned long rx_packets = 0;
1591 unsigned long rx_ok;
1592 int idx = rx_done->idx;
1593 int cnt = rx_done->cnt;
1598 while (rx_done->entry[idx].length != 0 && work_done < budget) {
1599 length = ntohs(rx_done->entry[idx].length);
1600 rx_done->entry[idx].length = 0;
1601 checksum = csum_unfold(rx_done->entry[idx].checksum);
1602 rx_ok = myri10ge_rx_done(ss, length, checksum);
1603 rx_packets += rx_ok;
1604 rx_bytes += rx_ok * (unsigned long)length;
1606 idx = cnt & (mgp->max_intr_slots - 1);
1611 ss->stats.rx_packets += rx_packets;
1612 ss->stats.rx_bytes += rx_bytes;
1614 /* restock receive rings if needed */
1615 if (ss->rx_small.fill_cnt - ss->rx_small.cnt < myri10ge_fill_thresh)
1616 myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
1617 mgp->small_bytes + MXGEFW_PAD, 0);
1618 if (ss->rx_big.fill_cnt - ss->rx_big.cnt < myri10ge_fill_thresh)
1619 myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0);
1624 static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp)
1626 struct mcp_irq_data *stats = mgp->ss[0].fw_stats;
1628 if (unlikely(stats->stats_updated)) {
1629 unsigned link_up = ntohl(stats->link_up);
1630 if (mgp->link_state != link_up) {
1631 mgp->link_state = link_up;
1633 if (mgp->link_state == MXGEFW_LINK_UP) {
1634 netif_info(mgp, link, mgp->dev, "link up\n");
1635 netif_carrier_on(mgp->dev);
1636 mgp->link_changes++;
1638 netif_info(mgp, link, mgp->dev, "link %s\n",
1639 (link_up == MXGEFW_LINK_MYRINET ?
1640 "mismatch (Myrinet detected)" :
1642 netif_carrier_off(mgp->dev);
1643 mgp->link_changes++;
1646 if (mgp->rdma_tags_available !=
1647 ntohl(stats->rdma_tags_available)) {
1648 mgp->rdma_tags_available =
1649 ntohl(stats->rdma_tags_available);
1650 netdev_warn(mgp->dev, "RDMA timed out! %d tags left\n",
1651 mgp->rdma_tags_available);
1653 mgp->down_cnt += stats->link_down;
1654 if (stats->link_down)
1655 wake_up(&mgp->down_wq);
1659 static int myri10ge_poll(struct napi_struct *napi, int budget)
1661 struct myri10ge_slice_state *ss =
1662 container_of(napi, struct myri10ge_slice_state, napi);
1665 #ifdef CONFIG_MYRI10GE_DCA
1666 if (ss->mgp->dca_enabled)
1667 myri10ge_update_dca(ss);
1669 /* Try later if the busy_poll handler is running. */
1670 if (!myri10ge_ss_lock_napi(ss))
1673 /* process as many rx events as NAPI will allow */
1674 work_done = myri10ge_clean_rx_done(ss, budget);
1676 myri10ge_ss_unlock_napi(ss);
1677 if (work_done < budget) {
1678 napi_complete(napi);
1679 put_be32(htonl(3), ss->irq_claim);
1684 #ifdef CONFIG_NET_RX_BUSY_POLL
1685 static int myri10ge_busy_poll(struct napi_struct *napi)
1687 struct myri10ge_slice_state *ss =
1688 container_of(napi, struct myri10ge_slice_state, napi);
1689 struct myri10ge_priv *mgp = ss->mgp;
1692 /* Poll only when the link is up */
1693 if (mgp->link_state != MXGEFW_LINK_UP)
1694 return LL_FLUSH_FAILED;
1696 if (!myri10ge_ss_lock_poll(ss))
1697 return LL_FLUSH_BUSY;
1699 /* Process a small number of packets */
1700 work_done = myri10ge_clean_rx_done(ss, 4);
1702 ss->busy_poll_cnt += work_done;
1704 ss->busy_poll_miss++;
1706 myri10ge_ss_unlock_poll(ss);
1710 #endif /* CONFIG_NET_RX_BUSY_POLL */
1712 static irqreturn_t myri10ge_intr(int irq, void *arg)
1714 struct myri10ge_slice_state *ss = arg;
1715 struct myri10ge_priv *mgp = ss->mgp;
1716 struct mcp_irq_data *stats = ss->fw_stats;
1717 struct myri10ge_tx_buf *tx = &ss->tx;
1718 u32 send_done_count;
1721 /* an interrupt on a non-zero receive-only slice is implicitly
1722 * valid since MSI-X irqs are not shared */
1723 if ((mgp->dev->real_num_tx_queues == 1) && (ss != mgp->ss)) {
1724 napi_schedule(&ss->napi);
1728 /* make sure it is our IRQ, and that the DMA has finished */
1729 if (unlikely(!stats->valid))
1732 /* low bit indicates receives are present, so schedule
1733 * napi poll handler */
1734 if (stats->valid & 1)
1735 napi_schedule(&ss->napi);
1737 if (!mgp->msi_enabled && !mgp->msix_enabled) {
1738 put_be32(0, mgp->irq_deassert);
1739 if (!myri10ge_deassert_wait)
1745 /* Wait for IRQ line to go low, if using INTx */
1749 /* check for transmit completes and receives */
1750 send_done_count = ntohl(stats->send_done_count);
1751 if (send_done_count != tx->pkt_done)
1752 myri10ge_tx_done(ss, (int)send_done_count);
1753 if (unlikely(i > myri10ge_max_irq_loops)) {
1754 netdev_warn(mgp->dev, "irq stuck?\n");
1756 schedule_work(&mgp->watchdog_work);
1758 if (likely(stats->valid == 0))
1764 /* Only slice 0 updates stats */
1766 myri10ge_check_statblock(mgp);
1768 put_be32(htonl(3), ss->irq_claim + 1);
1773 myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
1775 struct myri10ge_priv *mgp = netdev_priv(netdev);
1779 cmd->autoneg = AUTONEG_DISABLE;
1780 ethtool_cmd_speed_set(cmd, SPEED_10000);
1781 cmd->duplex = DUPLEX_FULL;
1784 * parse the product code to deterimine the interface type
1785 * (CX4, XFP, Quad Ribbon Fiber) by looking at the character
1786 * after the 3rd dash in the driver's cached copy of the
1787 * EEPROM's product code string.
1789 ptr = mgp->product_code_string;
1791 netdev_err(netdev, "Missing product code\n");
1794 for (i = 0; i < 3; i++, ptr++) {
1795 ptr = strchr(ptr, '-');
1797 netdev_err(netdev, "Invalid product code %s\n",
1798 mgp->product_code_string);
1804 if (*ptr == 'R' || *ptr == 'Q' || *ptr == 'S') {
1805 /* We've found either an XFP, quad ribbon fiber, or SFP+ */
1806 cmd->port = PORT_FIBRE;
1807 cmd->supported |= SUPPORTED_FIBRE;
1808 cmd->advertising |= ADVERTISED_FIBRE;
1810 cmd->port = PORT_OTHER;
1812 if (*ptr == 'R' || *ptr == 'S')
1813 cmd->transceiver = XCVR_EXTERNAL;
1815 cmd->transceiver = XCVR_INTERNAL;
1821 myri10ge_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
1823 struct myri10ge_priv *mgp = netdev_priv(netdev);
1825 strlcpy(info->driver, "myri10ge", sizeof(info->driver));
1826 strlcpy(info->version, MYRI10GE_VERSION_STR, sizeof(info->version));
1827 strlcpy(info->fw_version, mgp->fw_version, sizeof(info->fw_version));
1828 strlcpy(info->bus_info, pci_name(mgp->pdev), sizeof(info->bus_info));
1832 myri10ge_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal)
1834 struct myri10ge_priv *mgp = netdev_priv(netdev);
1836 coal->rx_coalesce_usecs = mgp->intr_coal_delay;
1841 myri10ge_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal)
1843 struct myri10ge_priv *mgp = netdev_priv(netdev);
1845 mgp->intr_coal_delay = coal->rx_coalesce_usecs;
1846 put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
1851 myri10ge_get_pauseparam(struct net_device *netdev,
1852 struct ethtool_pauseparam *pause)
1854 struct myri10ge_priv *mgp = netdev_priv(netdev);
1857 pause->rx_pause = mgp->pause;
1858 pause->tx_pause = mgp->pause;
1862 myri10ge_set_pauseparam(struct net_device *netdev,
1863 struct ethtool_pauseparam *pause)
1865 struct myri10ge_priv *mgp = netdev_priv(netdev);
1867 if (pause->tx_pause != mgp->pause)
1868 return myri10ge_change_pause(mgp, pause->tx_pause);
1869 if (pause->rx_pause != mgp->pause)
1870 return myri10ge_change_pause(mgp, pause->rx_pause);
1871 if (pause->autoneg != 0)
1877 myri10ge_get_ringparam(struct net_device *netdev,
1878 struct ethtool_ringparam *ring)
1880 struct myri10ge_priv *mgp = netdev_priv(netdev);
1882 ring->rx_mini_max_pending = mgp->ss[0].rx_small.mask + 1;
1883 ring->rx_max_pending = mgp->ss[0].rx_big.mask + 1;
1884 ring->rx_jumbo_max_pending = 0;
1885 ring->tx_max_pending = mgp->ss[0].tx.mask + 1;
1886 ring->rx_mini_pending = ring->rx_mini_max_pending;
1887 ring->rx_pending = ring->rx_max_pending;
1888 ring->rx_jumbo_pending = ring->rx_jumbo_max_pending;
1889 ring->tx_pending = ring->tx_max_pending;
1892 static const char myri10ge_gstrings_main_stats[][ETH_GSTRING_LEN] = {
1893 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
1894 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
1895 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
1896 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
1897 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
1898 "tx_heartbeat_errors", "tx_window_errors",
1899 /* device-specific stats */
1900 "tx_boundary", "irq", "MSI", "MSIX",
1901 "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs",
1902 "serial_number", "watchdog_resets",
1903 #ifdef CONFIG_MYRI10GE_DCA
1904 "dca_capable_firmware", "dca_device_present",
1906 "link_changes", "link_up", "dropped_link_overflow",
1907 "dropped_link_error_or_filtered",
1908 "dropped_pause", "dropped_bad_phy", "dropped_bad_crc32",
1909 "dropped_unicast_filtered", "dropped_multicast_filtered",
1910 "dropped_runt", "dropped_overrun", "dropped_no_small_buffer",
1911 "dropped_no_big_buffer"
1914 static const char myri10ge_gstrings_slice_stats[][ETH_GSTRING_LEN] = {
1915 "----------- slice ---------",
1916 "tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done",
1917 "rx_small_cnt", "rx_big_cnt",
1918 "wake_queue", "stop_queue", "tx_linearized",
1919 #ifdef CONFIG_NET_RX_BUSY_POLL
1920 "rx_lock_napi_yield", "rx_lock_poll_yield", "rx_busy_poll_miss",
1925 #define MYRI10GE_NET_STATS_LEN 21
1926 #define MYRI10GE_MAIN_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_main_stats)
1927 #define MYRI10GE_SLICE_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_slice_stats)
1930 myri10ge_get_strings(struct net_device *netdev, u32 stringset, u8 * data)
1932 struct myri10ge_priv *mgp = netdev_priv(netdev);
1935 switch (stringset) {
1937 memcpy(data, *myri10ge_gstrings_main_stats,
1938 sizeof(myri10ge_gstrings_main_stats));
1939 data += sizeof(myri10ge_gstrings_main_stats);
1940 for (i = 0; i < mgp->num_slices; i++) {
1941 memcpy(data, *myri10ge_gstrings_slice_stats,
1942 sizeof(myri10ge_gstrings_slice_stats));
1943 data += sizeof(myri10ge_gstrings_slice_stats);
1949 static int myri10ge_get_sset_count(struct net_device *netdev, int sset)
1951 struct myri10ge_priv *mgp = netdev_priv(netdev);
1955 return MYRI10GE_MAIN_STATS_LEN +
1956 mgp->num_slices * MYRI10GE_SLICE_STATS_LEN;
1963 myri10ge_get_ethtool_stats(struct net_device *netdev,
1964 struct ethtool_stats *stats, u64 * data)
1966 struct myri10ge_priv *mgp = netdev_priv(netdev);
1967 struct myri10ge_slice_state *ss;
1968 struct rtnl_link_stats64 link_stats;
1972 /* force stats update */
1973 memset(&link_stats, 0, sizeof(link_stats));
1974 (void)myri10ge_get_stats(netdev, &link_stats);
1975 for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++)
1976 data[i] = ((u64 *)&link_stats)[i];
1978 data[i++] = (unsigned int)mgp->tx_boundary;
1979 data[i++] = (unsigned int)mgp->pdev->irq;
1980 data[i++] = (unsigned int)mgp->msi_enabled;
1981 data[i++] = (unsigned int)mgp->msix_enabled;
1982 data[i++] = (unsigned int)mgp->read_dma;
1983 data[i++] = (unsigned int)mgp->write_dma;
1984 data[i++] = (unsigned int)mgp->read_write_dma;
1985 data[i++] = (unsigned int)mgp->serial_number;
1986 data[i++] = (unsigned int)mgp->watchdog_resets;
1987 #ifdef CONFIG_MYRI10GE_DCA
1988 data[i++] = (unsigned int)(mgp->ss[0].dca_tag != NULL);
1989 data[i++] = (unsigned int)(mgp->dca_enabled);
1991 data[i++] = (unsigned int)mgp->link_changes;
1993 /* firmware stats are useful only in the first slice */
1995 data[i++] = (unsigned int)ntohl(ss->fw_stats->link_up);
1996 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_link_overflow);
1998 (unsigned int)ntohl(ss->fw_stats->dropped_link_error_or_filtered);
1999 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_pause);
2000 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_phy);
2001 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_crc32);
2002 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_unicast_filtered);
2004 (unsigned int)ntohl(ss->fw_stats->dropped_multicast_filtered);
2005 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_runt);
2006 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_overrun);
2007 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_small_buffer);
2008 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_big_buffer);
2010 for (slice = 0; slice < mgp->num_slices; slice++) {
2011 ss = &mgp->ss[slice];
2013 data[i++] = (unsigned int)ss->tx.pkt_start;
2014 data[i++] = (unsigned int)ss->tx.pkt_done;
2015 data[i++] = (unsigned int)ss->tx.req;
2016 data[i++] = (unsigned int)ss->tx.done;
2017 data[i++] = (unsigned int)ss->rx_small.cnt;
2018 data[i++] = (unsigned int)ss->rx_big.cnt;
2019 data[i++] = (unsigned int)ss->tx.wake_queue;
2020 data[i++] = (unsigned int)ss->tx.stop_queue;
2021 data[i++] = (unsigned int)ss->tx.linearized;
2022 #ifdef CONFIG_NET_RX_BUSY_POLL
2023 data[i++] = ss->lock_napi_yield;
2024 data[i++] = ss->lock_poll_yield;
2025 data[i++] = ss->busy_poll_miss;
2026 data[i++] = ss->busy_poll_cnt;
2031 static void myri10ge_set_msglevel(struct net_device *netdev, u32 value)
2033 struct myri10ge_priv *mgp = netdev_priv(netdev);
2034 mgp->msg_enable = value;
2037 static u32 myri10ge_get_msglevel(struct net_device *netdev)
2039 struct myri10ge_priv *mgp = netdev_priv(netdev);
2040 return mgp->msg_enable;
2044 * Use a low-level command to change the LED behavior. Rather than
2045 * blinking (which is the normal case), when identify is used, the
2046 * yellow LED turns solid.
2048 static int myri10ge_led(struct myri10ge_priv *mgp, int on)
2050 struct mcp_gen_header *hdr;
2051 struct device *dev = &mgp->pdev->dev;
2052 size_t hdr_off, pattern_off, hdr_len;
2053 u32 pattern = 0xfffffffe;
2055 /* find running firmware header */
2056 hdr_off = swab32(readl(mgp->sram + MCP_HEADER_PTR_OFFSET));
2057 if ((hdr_off & 3) || hdr_off + sizeof(*hdr) > mgp->sram_size) {
2058 dev_err(dev, "Running firmware has bad header offset (%d)\n",
2062 hdr_len = swab32(readl(mgp->sram + hdr_off +
2063 offsetof(struct mcp_gen_header, header_length)));
2064 pattern_off = hdr_off + offsetof(struct mcp_gen_header, led_pattern);
2065 if (pattern_off >= (hdr_len + hdr_off)) {
2066 dev_info(dev, "Firmware does not support LED identification\n");
2070 pattern = swab32(readl(mgp->sram + pattern_off + 4));
2071 writel(swab32(pattern), mgp->sram + pattern_off);
2076 myri10ge_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
2078 struct myri10ge_priv *mgp = netdev_priv(netdev);
2082 case ETHTOOL_ID_ACTIVE:
2083 rc = myri10ge_led(mgp, 1);
2086 case ETHTOOL_ID_INACTIVE:
2087 rc = myri10ge_led(mgp, 0);
2097 static const struct ethtool_ops myri10ge_ethtool_ops = {
2098 .get_settings = myri10ge_get_settings,
2099 .get_drvinfo = myri10ge_get_drvinfo,
2100 .get_coalesce = myri10ge_get_coalesce,
2101 .set_coalesce = myri10ge_set_coalesce,
2102 .get_pauseparam = myri10ge_get_pauseparam,
2103 .set_pauseparam = myri10ge_set_pauseparam,
2104 .get_ringparam = myri10ge_get_ringparam,
2105 .get_link = ethtool_op_get_link,
2106 .get_strings = myri10ge_get_strings,
2107 .get_sset_count = myri10ge_get_sset_count,
2108 .get_ethtool_stats = myri10ge_get_ethtool_stats,
2109 .set_msglevel = myri10ge_set_msglevel,
2110 .get_msglevel = myri10ge_get_msglevel,
2111 .set_phys_id = myri10ge_phys_id,
2114 static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss)
2116 struct myri10ge_priv *mgp = ss->mgp;
2117 struct myri10ge_cmd cmd;
2118 struct net_device *dev = mgp->dev;
2119 int tx_ring_size, rx_ring_size;
2120 int tx_ring_entries, rx_ring_entries;
2121 int i, slice, status;
2124 /* get ring sizes */
2125 slice = ss - mgp->ss;
2127 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd, 0);
2128 tx_ring_size = cmd.data0;
2130 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0);
2133 rx_ring_size = cmd.data0;
2135 tx_ring_entries = tx_ring_size / sizeof(struct mcp_kreq_ether_send);
2136 rx_ring_entries = rx_ring_size / sizeof(struct mcp_dma_addr);
2137 ss->tx.mask = tx_ring_entries - 1;
2138 ss->rx_small.mask = ss->rx_big.mask = rx_ring_entries - 1;
2142 /* allocate the host shadow rings */
2144 bytes = 8 + (MYRI10GE_MAX_SEND_DESC_TSO + 4)
2145 * sizeof(*ss->tx.req_list);
2146 ss->tx.req_bytes = kzalloc(bytes, GFP_KERNEL);
2147 if (ss->tx.req_bytes == NULL)
2148 goto abort_with_nothing;
2150 /* ensure req_list entries are aligned to 8 bytes */
2151 ss->tx.req_list = (struct mcp_kreq_ether_send *)
2152 ALIGN((unsigned long)ss->tx.req_bytes, 8);
2153 ss->tx.queue_active = 0;
2155 bytes = rx_ring_entries * sizeof(*ss->rx_small.shadow);
2156 ss->rx_small.shadow = kzalloc(bytes, GFP_KERNEL);
2157 if (ss->rx_small.shadow == NULL)
2158 goto abort_with_tx_req_bytes;
2160 bytes = rx_ring_entries * sizeof(*ss->rx_big.shadow);
2161 ss->rx_big.shadow = kzalloc(bytes, GFP_KERNEL);
2162 if (ss->rx_big.shadow == NULL)
2163 goto abort_with_rx_small_shadow;
2165 /* allocate the host info rings */
2167 bytes = tx_ring_entries * sizeof(*ss->tx.info);
2168 ss->tx.info = kzalloc(bytes, GFP_KERNEL);
2169 if (ss->tx.info == NULL)
2170 goto abort_with_rx_big_shadow;
2172 bytes = rx_ring_entries * sizeof(*ss->rx_small.info);
2173 ss->rx_small.info = kzalloc(bytes, GFP_KERNEL);
2174 if (ss->rx_small.info == NULL)
2175 goto abort_with_tx_info;
2177 bytes = rx_ring_entries * sizeof(*ss->rx_big.info);
2178 ss->rx_big.info = kzalloc(bytes, GFP_KERNEL);
2179 if (ss->rx_big.info == NULL)
2180 goto abort_with_rx_small_info;
2182 /* Fill the receive rings */
2184 ss->rx_small.cnt = 0;
2185 ss->rx_big.fill_cnt = 0;
2186 ss->rx_small.fill_cnt = 0;
2187 ss->rx_small.page_offset = MYRI10GE_ALLOC_SIZE;
2188 ss->rx_big.page_offset = MYRI10GE_ALLOC_SIZE;
2189 ss->rx_small.watchdog_needed = 0;
2190 ss->rx_big.watchdog_needed = 0;
2191 if (mgp->small_bytes == 0) {
2192 ss->rx_small.fill_cnt = ss->rx_small.mask + 1;
2194 myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
2195 mgp->small_bytes + MXGEFW_PAD, 0);
2198 if (ss->rx_small.fill_cnt < ss->rx_small.mask + 1) {
2199 netdev_err(dev, "slice-%d: alloced only %d small bufs\n",
2200 slice, ss->rx_small.fill_cnt);
2201 goto abort_with_rx_small_ring;
2204 myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0);
2205 if (ss->rx_big.fill_cnt < ss->rx_big.mask + 1) {
2206 netdev_err(dev, "slice-%d: alloced only %d big bufs\n",
2207 slice, ss->rx_big.fill_cnt);
2208 goto abort_with_rx_big_ring;
2213 abort_with_rx_big_ring:
2214 for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) {
2215 int idx = i & ss->rx_big.mask;
2216 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx],
2218 put_page(ss->rx_big.info[idx].page);
2221 abort_with_rx_small_ring:
2222 if (mgp->small_bytes == 0)
2223 ss->rx_small.fill_cnt = ss->rx_small.cnt;
2224 for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) {
2225 int idx = i & ss->rx_small.mask;
2226 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx],
2227 mgp->small_bytes + MXGEFW_PAD);
2228 put_page(ss->rx_small.info[idx].page);
2231 kfree(ss->rx_big.info);
2233 abort_with_rx_small_info:
2234 kfree(ss->rx_small.info);
2239 abort_with_rx_big_shadow:
2240 kfree(ss->rx_big.shadow);
2242 abort_with_rx_small_shadow:
2243 kfree(ss->rx_small.shadow);
2245 abort_with_tx_req_bytes:
2246 kfree(ss->tx.req_bytes);
2247 ss->tx.req_bytes = NULL;
2248 ss->tx.req_list = NULL;
2254 static void myri10ge_free_rings(struct myri10ge_slice_state *ss)
2256 struct myri10ge_priv *mgp = ss->mgp;
2257 struct sk_buff *skb;
2258 struct myri10ge_tx_buf *tx;
2261 /* If not allocated, skip it */
2262 if (ss->tx.req_list == NULL)
2265 for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) {
2266 idx = i & ss->rx_big.mask;
2267 if (i == ss->rx_big.fill_cnt - 1)
2268 ss->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE;
2269 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx],
2271 put_page(ss->rx_big.info[idx].page);
2274 if (mgp->small_bytes == 0)
2275 ss->rx_small.fill_cnt = ss->rx_small.cnt;
2276 for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) {
2277 idx = i & ss->rx_small.mask;
2278 if (i == ss->rx_small.fill_cnt - 1)
2279 ss->rx_small.info[idx].page_offset =
2280 MYRI10GE_ALLOC_SIZE;
2281 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx],
2282 mgp->small_bytes + MXGEFW_PAD);
2283 put_page(ss->rx_small.info[idx].page);
2286 while (tx->done != tx->req) {
2287 idx = tx->done & tx->mask;
2288 skb = tx->info[idx].skb;
2291 tx->info[idx].skb = NULL;
2293 len = dma_unmap_len(&tx->info[idx], len);
2294 dma_unmap_len_set(&tx->info[idx], len, 0);
2296 ss->stats.tx_dropped++;
2297 dev_kfree_skb_any(skb);
2299 pci_unmap_single(mgp->pdev,
2300 dma_unmap_addr(&tx->info[idx],
2305 pci_unmap_page(mgp->pdev,
2306 dma_unmap_addr(&tx->info[idx],
2311 kfree(ss->rx_big.info);
2313 kfree(ss->rx_small.info);
2317 kfree(ss->rx_big.shadow);
2319 kfree(ss->rx_small.shadow);
2321 kfree(ss->tx.req_bytes);
2322 ss->tx.req_bytes = NULL;
2323 ss->tx.req_list = NULL;
2326 static int myri10ge_request_irq(struct myri10ge_priv *mgp)
2328 struct pci_dev *pdev = mgp->pdev;
2329 struct myri10ge_slice_state *ss;
2330 struct net_device *netdev = mgp->dev;
2334 mgp->msi_enabled = 0;
2335 mgp->msix_enabled = 0;
2338 if (mgp->num_slices > 1) {
2339 status = pci_enable_msix_range(pdev, mgp->msix_vectors,
2340 mgp->num_slices, mgp->num_slices);
2343 "Error %d setting up MSI-X\n", status);
2346 mgp->msix_enabled = 1;
2348 if (mgp->msix_enabled == 0) {
2349 status = pci_enable_msi(pdev);
2352 "Error %d setting up MSI; falling back to xPIC\n",
2355 mgp->msi_enabled = 1;
2359 if (mgp->msix_enabled) {
2360 for (i = 0; i < mgp->num_slices; i++) {
2362 snprintf(ss->irq_desc, sizeof(ss->irq_desc),
2363 "%s:slice-%d", netdev->name, i);
2364 status = request_irq(mgp->msix_vectors[i].vector,
2365 myri10ge_intr, 0, ss->irq_desc,
2369 "slice %d failed to allocate IRQ\n", i);
2372 free_irq(mgp->msix_vectors[i].vector,
2376 pci_disable_msix(pdev);
2381 status = request_irq(pdev->irq, myri10ge_intr, IRQF_SHARED,
2382 mgp->dev->name, &mgp->ss[0]);
2384 dev_err(&pdev->dev, "failed to allocate IRQ\n");
2385 if (mgp->msi_enabled)
2386 pci_disable_msi(pdev);
2392 static void myri10ge_free_irq(struct myri10ge_priv *mgp)
2394 struct pci_dev *pdev = mgp->pdev;
2397 if (mgp->msix_enabled) {
2398 for (i = 0; i < mgp->num_slices; i++)
2399 free_irq(mgp->msix_vectors[i].vector, &mgp->ss[i]);
2401 free_irq(pdev->irq, &mgp->ss[0]);
2403 if (mgp->msi_enabled)
2404 pci_disable_msi(pdev);
2405 if (mgp->msix_enabled)
2406 pci_disable_msix(pdev);
2409 static int myri10ge_get_txrx(struct myri10ge_priv *mgp, int slice)
2411 struct myri10ge_cmd cmd;
2412 struct myri10ge_slice_state *ss;
2415 ss = &mgp->ss[slice];
2417 if (slice == 0 || (mgp->dev->real_num_tx_queues > 1)) {
2419 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET,
2421 ss->tx.lanai = (struct mcp_kreq_ether_send __iomem *)
2422 (mgp->sram + cmd.data0);
2425 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SMALL_RX_OFFSET,
2427 ss->rx_small.lanai = (struct mcp_kreq_ether_recv __iomem *)
2428 (mgp->sram + cmd.data0);
2431 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd, 0);
2432 ss->rx_big.lanai = (struct mcp_kreq_ether_recv __iomem *)
2433 (mgp->sram + cmd.data0);
2435 ss->tx.send_go = (__iomem __be32 *)
2436 (mgp->sram + MXGEFW_ETH_SEND_GO + 64 * slice);
2437 ss->tx.send_stop = (__iomem __be32 *)
2438 (mgp->sram + MXGEFW_ETH_SEND_STOP + 64 * slice);
2443 static int myri10ge_set_stats(struct myri10ge_priv *mgp, int slice)
2445 struct myri10ge_cmd cmd;
2446 struct myri10ge_slice_state *ss;
2449 ss = &mgp->ss[slice];
2450 cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->fw_stats_bus);
2451 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->fw_stats_bus);
2452 cmd.data2 = sizeof(struct mcp_irq_data) | (slice << 16);
2453 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd, 0);
2454 if (status == -ENOSYS) {
2455 dma_addr_t bus = ss->fw_stats_bus;
2458 bus += offsetof(struct mcp_irq_data, send_done_count);
2459 cmd.data0 = MYRI10GE_LOWPART_TO_U32(bus);
2460 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(bus);
2461 status = myri10ge_send_cmd(mgp,
2462 MXGEFW_CMD_SET_STATS_DMA_OBSOLETE,
2464 /* Firmware cannot support multicast without STATS_DMA_V2 */
2465 mgp->fw_multicast_support = 0;
2467 mgp->fw_multicast_support = 1;
2472 static int myri10ge_open(struct net_device *dev)
2474 struct myri10ge_slice_state *ss;
2475 struct myri10ge_priv *mgp = netdev_priv(dev);
2476 struct myri10ge_cmd cmd;
2477 int i, status, big_pow2, slice;
2480 if (mgp->running != MYRI10GE_ETH_STOPPED)
2483 mgp->running = MYRI10GE_ETH_STARTING;
2484 status = myri10ge_reset(mgp);
2486 netdev_err(dev, "failed reset\n");
2487 goto abort_with_nothing;
2490 if (mgp->num_slices > 1) {
2491 cmd.data0 = mgp->num_slices;
2492 cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE;
2493 if (mgp->dev->real_num_tx_queues > 1)
2494 cmd.data1 |= MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES;
2495 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES,
2498 netdev_err(dev, "failed to set number of slices\n");
2499 goto abort_with_nothing;
2501 /* setup the indirection table */
2502 cmd.data0 = mgp->num_slices;
2503 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_TABLE_SIZE,
2506 status |= myri10ge_send_cmd(mgp,
2507 MXGEFW_CMD_GET_RSS_TABLE_OFFSET,
2510 netdev_err(dev, "failed to setup rss tables\n");
2511 goto abort_with_nothing;
2514 /* just enable an identity mapping */
2515 itable = mgp->sram + cmd.data0;
2516 for (i = 0; i < mgp->num_slices; i++)
2517 __raw_writeb(i, &itable[i]);
2520 cmd.data1 = myri10ge_rss_hash;
2521 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_ENABLE,
2524 netdev_err(dev, "failed to enable slices\n");
2525 goto abort_with_nothing;
2529 status = myri10ge_request_irq(mgp);
2531 goto abort_with_nothing;
2533 /* decide what small buffer size to use. For good TCP rx
2534 * performance, it is important to not receive 1514 byte
2535 * frames into jumbo buffers, as it confuses the socket buffer
2536 * accounting code, leading to drops and erratic performance.
2539 if (dev->mtu <= ETH_DATA_LEN)
2540 /* enough for a TCP header */
2541 mgp->small_bytes = (128 > SMP_CACHE_BYTES)
2542 ? (128 - MXGEFW_PAD)
2543 : (SMP_CACHE_BYTES - MXGEFW_PAD);
2545 /* enough for a vlan encapsulated ETH_DATA_LEN frame */
2546 mgp->small_bytes = VLAN_ETH_FRAME_LEN;
2548 /* Override the small buffer size? */
2549 if (myri10ge_small_bytes >= 0)
2550 mgp->small_bytes = myri10ge_small_bytes;
2552 /* Firmware needs the big buff size as a power of 2. Lie and
2553 * tell him the buffer is larger, because we only use 1
2554 * buffer/pkt, and the mtu will prevent overruns.
2556 big_pow2 = dev->mtu + ETH_HLEN + VLAN_HLEN + MXGEFW_PAD;
2557 if (big_pow2 < MYRI10GE_ALLOC_SIZE / 2) {
2558 while (!is_power_of_2(big_pow2))
2560 mgp->big_bytes = dev->mtu + ETH_HLEN + VLAN_HLEN + MXGEFW_PAD;
2562 big_pow2 = MYRI10GE_ALLOC_SIZE;
2563 mgp->big_bytes = big_pow2;
2566 /* setup the per-slice data structures */
2567 for (slice = 0; slice < mgp->num_slices; slice++) {
2568 ss = &mgp->ss[slice];
2570 status = myri10ge_get_txrx(mgp, slice);
2572 netdev_err(dev, "failed to get ring sizes or locations\n");
2573 goto abort_with_rings;
2575 status = myri10ge_allocate_rings(ss);
2577 goto abort_with_rings;
2579 /* only firmware which supports multiple TX queues
2580 * supports setting up the tx stats on non-zero
2582 if (slice == 0 || mgp->dev->real_num_tx_queues > 1)
2583 status = myri10ge_set_stats(mgp, slice);
2585 netdev_err(dev, "Couldn't set stats DMA\n");
2586 goto abort_with_rings;
2589 /* Initialize the slice spinlock and state used for polling */
2590 myri10ge_ss_init_lock(ss);
2592 /* must happen prior to any irq */
2593 napi_enable(&(ss)->napi);
2596 /* now give firmware buffers sizes, and MTU */
2597 cmd.data0 = dev->mtu + ETH_HLEN + VLAN_HLEN;
2598 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_MTU, &cmd, 0);
2599 cmd.data0 = mgp->small_bytes;
2601 myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_SMALL_BUFFER_SIZE, &cmd, 0);
2602 cmd.data0 = big_pow2;
2604 myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_BIG_BUFFER_SIZE, &cmd, 0);
2606 netdev_err(dev, "Couldn't set buffer sizes\n");
2607 goto abort_with_rings;
2611 * Set Linux style TSO mode; this is needed only on newer
2612 * firmware versions. Older versions default to Linux
2616 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_TSO_MODE, &cmd, 0);
2617 if (status && status != -ENOSYS) {
2618 netdev_err(dev, "Couldn't set TSO mode\n");
2619 goto abort_with_rings;
2622 mgp->link_state = ~0U;
2623 mgp->rdma_tags_available = 15;
2625 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0);
2627 netdev_err(dev, "Couldn't bring up link\n");
2628 goto abort_with_rings;
2631 mgp->running = MYRI10GE_ETH_RUNNING;
2632 mgp->watchdog_timer.expires = jiffies + myri10ge_watchdog_timeout * HZ;
2633 add_timer(&mgp->watchdog_timer);
2634 netif_tx_wake_all_queues(dev);
2641 napi_disable(&mgp->ss[slice].napi);
2643 for (i = 0; i < mgp->num_slices; i++)
2644 myri10ge_free_rings(&mgp->ss[i]);
2646 myri10ge_free_irq(mgp);
2649 mgp->running = MYRI10GE_ETH_STOPPED;
2653 static int myri10ge_close(struct net_device *dev)
2655 struct myri10ge_priv *mgp = netdev_priv(dev);
2656 struct myri10ge_cmd cmd;
2657 int status, old_down_cnt;
2660 if (mgp->running != MYRI10GE_ETH_RUNNING)
2663 if (mgp->ss[0].tx.req_bytes == NULL)
2666 del_timer_sync(&mgp->watchdog_timer);
2667 mgp->running = MYRI10GE_ETH_STOPPING;
2668 for (i = 0; i < mgp->num_slices; i++) {
2669 napi_disable(&mgp->ss[i].napi);
2670 local_bh_disable(); /* myri10ge_ss_lock_napi needs this */
2671 /* Lock the slice to prevent the busy_poll handler from
2672 * accessing it. Later when we bring the NIC up, myri10ge_open
2673 * resets the slice including this lock.
2675 while (!myri10ge_ss_lock_napi(&mgp->ss[i])) {
2676 pr_info("Slice %d locked\n", i);
2681 netif_carrier_off(dev);
2683 netif_tx_stop_all_queues(dev);
2684 if (mgp->rebooted == 0) {
2685 old_down_cnt = mgp->down_cnt;
2688 myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_DOWN, &cmd, 0);
2690 netdev_err(dev, "Couldn't bring down link\n");
2692 wait_event_timeout(mgp->down_wq, old_down_cnt != mgp->down_cnt,
2694 if (old_down_cnt == mgp->down_cnt)
2695 netdev_err(dev, "never got down irq\n");
2697 netif_tx_disable(dev);
2698 myri10ge_free_irq(mgp);
2699 for (i = 0; i < mgp->num_slices; i++)
2700 myri10ge_free_rings(&mgp->ss[i]);
2702 mgp->running = MYRI10GE_ETH_STOPPED;
2706 /* copy an array of struct mcp_kreq_ether_send's to the mcp. Copy
2707 * backwards one at a time and handle ring wraps */
2710 myri10ge_submit_req_backwards(struct myri10ge_tx_buf *tx,
2711 struct mcp_kreq_ether_send *src, int cnt)
2713 int idx, starting_slot;
2714 starting_slot = tx->req;
2717 idx = (starting_slot + cnt) & tx->mask;
2718 myri10ge_pio_copy(&tx->lanai[idx], &src[cnt], sizeof(*src));
2724 * copy an array of struct mcp_kreq_ether_send's to the mcp. Copy
2725 * at most 32 bytes at a time, so as to avoid involving the software
2726 * pio handler in the nic. We re-write the first segment's flags
2727 * to mark them valid only after writing the entire chain.
2731 myri10ge_submit_req(struct myri10ge_tx_buf *tx, struct mcp_kreq_ether_send *src,
2735 struct mcp_kreq_ether_send __iomem *dstp, *dst;
2736 struct mcp_kreq_ether_send *srcp;
2739 idx = tx->req & tx->mask;
2741 last_flags = src->flags;
2744 dst = dstp = &tx->lanai[idx];
2747 if ((idx + cnt) < tx->mask) {
2748 for (i = 0; i < (cnt - 1); i += 2) {
2749 myri10ge_pio_copy(dstp, srcp, 2 * sizeof(*src));
2750 mb(); /* force write every 32 bytes */
2755 /* submit all but the first request, and ensure
2756 * that it is submitted below */
2757 myri10ge_submit_req_backwards(tx, src, cnt);
2761 /* submit the first request */
2762 myri10ge_pio_copy(dstp, srcp, sizeof(*src));
2763 mb(); /* barrier before setting valid flag */
2766 /* re-write the last 32-bits with the valid flags */
2767 src->flags = last_flags;
2768 put_be32(*((__be32 *) src + 3), (__be32 __iomem *) dst + 3);
2773 static void myri10ge_unmap_tx_dma(struct myri10ge_priv *mgp,
2774 struct myri10ge_tx_buf *tx, int idx)
2779 /* Free any DMA resources we've alloced and clear out the skb slot */
2780 last_idx = (idx + 1) & tx->mask;
2781 idx = tx->req & tx->mask;
2783 len = dma_unmap_len(&tx->info[idx], len);
2785 if (tx->info[idx].skb != NULL)
2786 pci_unmap_single(mgp->pdev,
2787 dma_unmap_addr(&tx->info[idx],
2791 pci_unmap_page(mgp->pdev,
2792 dma_unmap_addr(&tx->info[idx],
2795 dma_unmap_len_set(&tx->info[idx], len, 0);
2796 tx->info[idx].skb = NULL;
2798 idx = (idx + 1) & tx->mask;
2799 } while (idx != last_idx);
2803 * Transmit a packet. We need to split the packet so that a single
2804 * segment does not cross myri10ge->tx_boundary, so this makes segment
2805 * counting tricky. So rather than try to count segments up front, we
2806 * just give up if there are too few segments to hold a reasonably
2807 * fragmented packet currently available. If we run
2808 * out of segments while preparing a packet for DMA, we just linearize
2812 static netdev_tx_t myri10ge_xmit(struct sk_buff *skb,
2813 struct net_device *dev)
2815 struct myri10ge_priv *mgp = netdev_priv(dev);
2816 struct myri10ge_slice_state *ss;
2817 struct mcp_kreq_ether_send *req;
2818 struct myri10ge_tx_buf *tx;
2819 struct skb_frag_struct *frag;
2820 struct netdev_queue *netdev_queue;
2823 __be32 high_swapped;
2825 int idx, avail, frag_cnt, frag_idx, count, mss, max_segments;
2826 u16 pseudo_hdr_offset, cksum_offset, queue;
2827 int cum_len, seglen, boundary, rdma_count;
2830 queue = skb_get_queue_mapping(skb);
2831 ss = &mgp->ss[queue];
2832 netdev_queue = netdev_get_tx_queue(mgp->dev, queue);
2837 avail = tx->mask - 1 - (tx->req - tx->done);
2840 max_segments = MXGEFW_MAX_SEND_DESC;
2842 if (skb_is_gso(skb)) {
2843 mss = skb_shinfo(skb)->gso_size;
2844 max_segments = MYRI10GE_MAX_SEND_DESC_TSO;
2847 if ((unlikely(avail < max_segments))) {
2848 /* we are out of transmit resources */
2850 netif_tx_stop_queue(netdev_queue);
2851 return NETDEV_TX_BUSY;
2854 /* Setup checksum offloading, if needed */
2856 pseudo_hdr_offset = 0;
2858 flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST);
2859 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
2860 cksum_offset = skb_checksum_start_offset(skb);
2861 pseudo_hdr_offset = cksum_offset + skb->csum_offset;
2862 /* If the headers are excessively large, then we must
2863 * fall back to a software checksum */
2864 if (unlikely(!mss && (cksum_offset > 255 ||
2865 pseudo_hdr_offset > 127))) {
2866 if (skb_checksum_help(skb))
2869 pseudo_hdr_offset = 0;
2871 odd_flag = MXGEFW_FLAGS_ALIGN_ODD;
2872 flags |= MXGEFW_FLAGS_CKSUM;
2878 if (mss) { /* TSO */
2879 /* this removes any CKSUM flag from before */
2880 flags = (MXGEFW_FLAGS_TSO_HDR | MXGEFW_FLAGS_FIRST);
2882 /* negative cum_len signifies to the
2883 * send loop that we are still in the
2884 * header portion of the TSO packet.
2885 * TSO header can be at most 1KB long */
2886 cum_len = -(skb_transport_offset(skb) + tcp_hdrlen(skb));
2888 /* for IPv6 TSO, the checksum offset stores the
2889 * TCP header length, to save the firmware from
2890 * the need to parse the headers */
2891 if (skb_is_gso_v6(skb)) {
2892 cksum_offset = tcp_hdrlen(skb);
2893 /* Can only handle headers <= max_tso6 long */
2894 if (unlikely(-cum_len > mgp->max_tso6))
2895 return myri10ge_sw_tso(skb, dev);
2897 /* for TSO, pseudo_hdr_offset holds mss.
2898 * The firmware figures out where to put
2899 * the checksum by parsing the header. */
2900 pseudo_hdr_offset = mss;
2902 /* Mark small packets, and pad out tiny packets */
2903 if (skb->len <= MXGEFW_SEND_SMALL_SIZE) {
2904 flags |= MXGEFW_FLAGS_SMALL;
2906 /* pad frames to at least ETH_ZLEN bytes */
2907 if (eth_skb_pad(skb)) {
2908 /* The packet is gone, so we must
2910 ss->stats.tx_dropped += 1;
2911 return NETDEV_TX_OK;
2915 /* map the skb for DMA */
2916 len = skb_headlen(skb);
2917 bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE);
2918 if (unlikely(pci_dma_mapping_error(mgp->pdev, bus)))
2921 idx = tx->req & tx->mask;
2922 tx->info[idx].skb = skb;
2923 dma_unmap_addr_set(&tx->info[idx], bus, bus);
2924 dma_unmap_len_set(&tx->info[idx], len, len);
2926 frag_cnt = skb_shinfo(skb)->nr_frags;
2931 /* "rdma_count" is the number of RDMAs belonging to the
2932 * current packet BEFORE the current send request. For
2933 * non-TSO packets, this is equal to "count".
2934 * For TSO packets, rdma_count needs to be reset
2935 * to 0 after a segment cut.
2937 * The rdma_count field of the send request is
2938 * the number of RDMAs of the packet starting at
2939 * that request. For TSO send requests with one ore more cuts
2940 * in the middle, this is the number of RDMAs starting
2941 * after the last cut in the request. All previous
2942 * segments before the last cut implicitly have 1 RDMA.
2944 * Since the number of RDMAs is not known beforehand,
2945 * it must be filled-in retroactively - after each
2946 * segmentation cut or at the end of the entire packet.
2950 /* Break the SKB or Fragment up into pieces which
2951 * do not cross mgp->tx_boundary */
2952 low = MYRI10GE_LOWPART_TO_U32(bus);
2953 high_swapped = htonl(MYRI10GE_HIGHPART_TO_U32(bus));
2958 if (unlikely(count == max_segments))
2959 goto abort_linearize;
2962 (low + mgp->tx_boundary) & ~(mgp->tx_boundary - 1);
2963 seglen = boundary - low;
2966 flags_next = flags & ~MXGEFW_FLAGS_FIRST;
2967 cum_len_next = cum_len + seglen;
2968 if (mss) { /* TSO */
2969 (req - rdma_count)->rdma_count = rdma_count + 1;
2971 if (likely(cum_len >= 0)) { /* payload */
2972 int next_is_first, chop;
2974 chop = (cum_len_next > mss);
2975 cum_len_next = cum_len_next % mss;
2976 next_is_first = (cum_len_next == 0);
2977 flags |= chop * MXGEFW_FLAGS_TSO_CHOP;
2978 flags_next |= next_is_first *
2980 rdma_count |= -(chop | next_is_first);
2981 rdma_count += chop & ~next_is_first;
2982 } else if (likely(cum_len_next >= 0)) { /* header ends */
2988 small = (mss <= MXGEFW_SEND_SMALL_SIZE);
2989 flags_next = MXGEFW_FLAGS_TSO_PLD |
2990 MXGEFW_FLAGS_FIRST |
2991 (small * MXGEFW_FLAGS_SMALL);
2994 req->addr_high = high_swapped;
2995 req->addr_low = htonl(low);
2996 req->pseudo_hdr_offset = htons(pseudo_hdr_offset);
2997 req->pad = 0; /* complete solid 16-byte block; does this matter? */
2998 req->rdma_count = 1;
2999 req->length = htons(seglen);
3000 req->cksum_offset = cksum_offset;
3001 req->flags = flags | ((cum_len & 1) * odd_flag);
3005 cum_len = cum_len_next;
3010 if (cksum_offset != 0 && !(mss && skb_is_gso_v6(skb))) {
3011 if (unlikely(cksum_offset > seglen))
3012 cksum_offset -= seglen;
3017 if (frag_idx == frag_cnt)
3020 /* map next fragment for DMA */
3021 frag = &skb_shinfo(skb)->frags[frag_idx];
3023 len = skb_frag_size(frag);
3024 bus = skb_frag_dma_map(&mgp->pdev->dev, frag, 0, len,
3026 if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) {
3027 myri10ge_unmap_tx_dma(mgp, tx, idx);
3030 idx = (count + tx->req) & tx->mask;
3031 dma_unmap_addr_set(&tx->info[idx], bus, bus);
3032 dma_unmap_len_set(&tx->info[idx], len, len);
3035 (req - rdma_count)->rdma_count = rdma_count;
3039 req->flags |= MXGEFW_FLAGS_TSO_LAST;
3040 } while (!(req->flags & (MXGEFW_FLAGS_TSO_CHOP |
3041 MXGEFW_FLAGS_FIRST)));
3042 idx = ((count - 1) + tx->req) & tx->mask;
3043 tx->info[idx].last = 1;
3044 myri10ge_submit_req(tx, tx->req_list, count);
3045 /* if using multiple tx queues, make sure NIC polls the
3047 if ((mgp->dev->real_num_tx_queues > 1) && tx->queue_active == 0) {
3048 tx->queue_active = 1;
3049 put_be32(htonl(1), tx->send_go);
3054 if ((avail - count) < MXGEFW_MAX_SEND_DESC) {
3056 netif_tx_stop_queue(netdev_queue);
3058 return NETDEV_TX_OK;
3061 myri10ge_unmap_tx_dma(mgp, tx, idx);
3063 if (skb_is_gso(skb)) {
3064 netdev_err(mgp->dev, "TSO but wanted to linearize?!?!?\n");
3068 if (skb_linearize(skb))
3075 dev_kfree_skb_any(skb);
3076 ss->stats.tx_dropped += 1;
3077 return NETDEV_TX_OK;
3081 static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
3082 struct net_device *dev)
3084 struct sk_buff *segs, *curr;
3085 struct myri10ge_priv *mgp = netdev_priv(dev);
3086 struct myri10ge_slice_state *ss;
3089 segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6);
3097 status = myri10ge_xmit(curr, dev);
3099 dev_kfree_skb_any(curr);
3104 dev_kfree_skb_any(segs);
3109 dev_kfree_skb_any(skb);
3110 return NETDEV_TX_OK;
3113 ss = &mgp->ss[skb_get_queue_mapping(skb)];
3114 dev_kfree_skb_any(skb);
3115 ss->stats.tx_dropped += 1;
3116 return NETDEV_TX_OK;
3119 static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev,
3120 struct rtnl_link_stats64 *stats)
3122 const struct myri10ge_priv *mgp = netdev_priv(dev);
3123 const struct myri10ge_slice_netstats *slice_stats;
3126 for (i = 0; i < mgp->num_slices; i++) {
3127 slice_stats = &mgp->ss[i].stats;
3128 stats->rx_packets += slice_stats->rx_packets;
3129 stats->tx_packets += slice_stats->tx_packets;
3130 stats->rx_bytes += slice_stats->rx_bytes;
3131 stats->tx_bytes += slice_stats->tx_bytes;
3132 stats->rx_dropped += slice_stats->rx_dropped;
3133 stats->tx_dropped += slice_stats->tx_dropped;
3138 static void myri10ge_set_multicast_list(struct net_device *dev)
3140 struct myri10ge_priv *mgp = netdev_priv(dev);
3141 struct myri10ge_cmd cmd;
3142 struct netdev_hw_addr *ha;
3143 __be32 data[2] = { 0, 0 };
3146 /* can be called from atomic contexts,
3147 * pass 1 to force atomicity in myri10ge_send_cmd() */
3148 myri10ge_change_promisc(mgp, dev->flags & IFF_PROMISC, 1);
3150 /* This firmware is known to not support multicast */
3151 if (!mgp->fw_multicast_support)
3154 /* Disable multicast filtering */
3156 err = myri10ge_send_cmd(mgp, MXGEFW_ENABLE_ALLMULTI, &cmd, 1);
3158 netdev_err(dev, "Failed MXGEFW_ENABLE_ALLMULTI, error status: %d\n",
3163 if ((dev->flags & IFF_ALLMULTI) || mgp->adopted_rx_filter_bug) {
3164 /* request to disable multicast filtering, so quit here */
3168 /* Flush the filters */
3170 err = myri10ge_send_cmd(mgp, MXGEFW_LEAVE_ALL_MULTICAST_GROUPS,
3173 netdev_err(dev, "Failed MXGEFW_LEAVE_ALL_MULTICAST_GROUPS, error status: %d\n",
3178 /* Walk the multicast list, and add each address */
3179 netdev_for_each_mc_addr(ha, dev) {
3180 memcpy(data, &ha->addr, ETH_ALEN);
3181 cmd.data0 = ntohl(data[0]);
3182 cmd.data1 = ntohl(data[1]);
3183 err = myri10ge_send_cmd(mgp, MXGEFW_JOIN_MULTICAST_GROUP,
3187 netdev_err(dev, "Failed MXGEFW_JOIN_MULTICAST_GROUP, error status:%d %pM\n",
3192 /* Enable multicast filtering */
3193 err = myri10ge_send_cmd(mgp, MXGEFW_DISABLE_ALLMULTI, &cmd, 1);
3195 netdev_err(dev, "Failed MXGEFW_DISABLE_ALLMULTI, error status: %d\n",
3206 static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
3208 struct sockaddr *sa = addr;
3209 struct myri10ge_priv *mgp = netdev_priv(dev);
3212 if (!is_valid_ether_addr(sa->sa_data))
3213 return -EADDRNOTAVAIL;
3215 status = myri10ge_update_mac_address(mgp, sa->sa_data);
3217 netdev_err(dev, "changing mac address failed with %d\n",
3222 /* change the dev structure */
3223 memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
3227 static int myri10ge_change_mtu(struct net_device *dev, int new_mtu)
3229 struct myri10ge_priv *mgp = netdev_priv(dev);
3232 if ((new_mtu < 68) || (ETH_HLEN + new_mtu > MYRI10GE_MAX_ETHER_MTU)) {
3233 netdev_err(dev, "new mtu (%d) is not valid\n", new_mtu);
3236 netdev_info(dev, "changing mtu from %d to %d\n", dev->mtu, new_mtu);
3238 /* if we change the mtu on an active device, we must
3239 * reset the device so the firmware sees the change */
3240 myri10ge_close(dev);
3250 * Enable ECRC to align PCI-E Completion packets on an 8-byte boundary.
3251 * Only do it if the bridge is a root port since we don't want to disturb
3252 * any other device, except if forced with myri10ge_ecrc_enable > 1.
3255 static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp)
3257 struct pci_dev *bridge = mgp->pdev->bus->self;
3258 struct device *dev = &mgp->pdev->dev;
3263 if (!myri10ge_ecrc_enable || !bridge)
3266 /* check that the bridge is a root port */
3267 if (pci_pcie_type(bridge) != PCI_EXP_TYPE_ROOT_PORT) {
3268 if (myri10ge_ecrc_enable > 1) {
3269 struct pci_dev *prev_bridge, *old_bridge = bridge;
3271 /* Walk the hierarchy up to the root port
3272 * where ECRC has to be enabled */
3274 prev_bridge = bridge;
3275 bridge = bridge->bus->self;
3276 if (!bridge || prev_bridge == bridge) {
3278 "Failed to find root port"
3279 " to force ECRC\n");
3282 } while (pci_pcie_type(bridge) !=
3283 PCI_EXP_TYPE_ROOT_PORT);
3286 "Forcing ECRC on non-root port %s"
3287 " (enabling on root port %s)\n",
3288 pci_name(old_bridge), pci_name(bridge));
3291 "Not enabling ECRC on non-root port %s\n",
3297 cap = pci_find_ext_capability(bridge, PCI_EXT_CAP_ID_ERR);
3301 ret = pci_read_config_dword(bridge, cap + PCI_ERR_CAP, &err_cap);
3303 dev_err(dev, "failed reading ext-conf-space of %s\n",
3305 dev_err(dev, "\t pci=nommconf in use? "
3306 "or buggy/incomplete/absent ACPI MCFG attr?\n");
3309 if (!(err_cap & PCI_ERR_CAP_ECRC_GENC))
3312 err_cap |= PCI_ERR_CAP_ECRC_GENE;
3313 pci_write_config_dword(bridge, cap + PCI_ERR_CAP, err_cap);
3314 dev_info(dev, "Enabled ECRC on upstream bridge %s\n", pci_name(bridge));
3318 * The Lanai Z8E PCI-E interface achieves higher Read-DMA throughput
3319 * when the PCI-E Completion packets are aligned on an 8-byte
3320 * boundary. Some PCI-E chip sets always align Completion packets; on
3321 * the ones that do not, the alignment can be enforced by enabling
3322 * ECRC generation (if supported).
3324 * When PCI-E Completion packets are not aligned, it is actually more
3325 * efficient to limit Read-DMA transactions to 2KB, rather than 4KB.
3329 static void myri10ge_firmware_probe(struct myri10ge_priv *mgp)
3331 struct pci_dev *pdev = mgp->pdev;
3332 struct device *dev = &pdev->dev;
3335 mgp->tx_boundary = 4096;
3337 * Verify the max read request size was set to 4KB
3338 * before trying the test with 4KB.
3340 status = pcie_get_readrq(pdev);
3342 dev_err(dev, "Couldn't read max read req size: %d\n", status);
3345 if (status != 4096) {
3346 dev_warn(dev, "Max Read Request size != 4096 (%d)\n", status);
3347 mgp->tx_boundary = 2048;
3350 * load the optimized firmware (which assumes aligned PCIe
3351 * completions) in order to see if it works on this host.
3353 set_fw_name(mgp, myri10ge_fw_aligned, false);
3354 status = myri10ge_load_firmware(mgp, 1);
3360 * Enable ECRC if possible
3362 myri10ge_enable_ecrc(mgp);
3365 * Run a DMA test which watches for unaligned completions and
3366 * aborts on the first one seen.
3369 status = myri10ge_dma_test(mgp, MXGEFW_CMD_UNALIGNED_TEST);
3371 return; /* keep the aligned firmware */
3373 if (status != -E2BIG)
3374 dev_warn(dev, "DMA test failed: %d\n", status);
3375 if (status == -ENOSYS)
3376 dev_warn(dev, "Falling back to ethp! "
3377 "Please install up to date fw\n");
3379 /* fall back to using the unaligned firmware */
3380 mgp->tx_boundary = 2048;
3381 set_fw_name(mgp, myri10ge_fw_unaligned, false);
3384 static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
3388 if (myri10ge_force_firmware == 0) {
3392 pcie_capability_read_word(mgp->pdev, PCI_EXP_LNKSTA, &lnk);
3393 link_width = (lnk >> 4) & 0x3f;
3395 /* Check to see if Link is less than 8 or if the
3396 * upstream bridge is known to provide aligned
3398 if (link_width < 8) {
3399 dev_info(&mgp->pdev->dev, "PCIE x%d Link\n",
3401 mgp->tx_boundary = 4096;
3402 set_fw_name(mgp, myri10ge_fw_aligned, false);
3404 myri10ge_firmware_probe(mgp);
3407 if (myri10ge_force_firmware == 1) {
3408 dev_info(&mgp->pdev->dev,
3409 "Assuming aligned completions (forced)\n");
3410 mgp->tx_boundary = 4096;
3411 set_fw_name(mgp, myri10ge_fw_aligned, false);
3413 dev_info(&mgp->pdev->dev,
3414 "Assuming unaligned completions (forced)\n");
3415 mgp->tx_boundary = 2048;
3416 set_fw_name(mgp, myri10ge_fw_unaligned, false);
3420 kernel_param_lock(THIS_MODULE);
3421 if (myri10ge_fw_name != NULL) {
3422 char *fw_name = kstrdup(myri10ge_fw_name, GFP_KERNEL);
3425 set_fw_name(mgp, fw_name, true);
3428 kernel_param_unlock(THIS_MODULE);
3430 if (mgp->board_number < MYRI10GE_MAX_BOARDS &&
3431 myri10ge_fw_names[mgp->board_number] != NULL &&
3432 strlen(myri10ge_fw_names[mgp->board_number])) {
3433 set_fw_name(mgp, myri10ge_fw_names[mgp->board_number], false);
3437 dev_info(&mgp->pdev->dev, "overriding firmware to %s\n",
3441 static void myri10ge_mask_surprise_down(struct pci_dev *pdev)
3443 struct pci_dev *bridge = pdev->bus->self;
3450 cap = pci_find_ext_capability(bridge, PCI_EXT_CAP_ID_ERR);
3452 /* a sram parity error can cause a surprise link
3453 * down; since we expect and can recover from sram
3454 * parity errors, mask surprise link down events */
3455 pci_read_config_dword(bridge, cap + PCI_ERR_UNCOR_MASK, &mask);
3457 pci_write_config_dword(bridge, cap + PCI_ERR_UNCOR_MASK, mask);
3462 static int myri10ge_suspend(struct pci_dev *pdev, pm_message_t state)
3464 struct myri10ge_priv *mgp;
3465 struct net_device *netdev;
3467 mgp = pci_get_drvdata(pdev);
3472 netif_device_detach(netdev);
3473 if (netif_running(netdev)) {
3474 netdev_info(netdev, "closing\n");
3476 myri10ge_close(netdev);
3479 myri10ge_dummy_rdma(mgp, 0);
3480 pci_save_state(pdev);
3481 pci_disable_device(pdev);
3483 return pci_set_power_state(pdev, pci_choose_state(pdev, state));
3486 static int myri10ge_resume(struct pci_dev *pdev)
3488 struct myri10ge_priv *mgp;
3489 struct net_device *netdev;
3493 mgp = pci_get_drvdata(pdev);
3497 pci_set_power_state(pdev, PCI_D0); /* zeros conf space as a side effect */
3498 msleep(5); /* give card time to respond */
3499 pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor);
3500 if (vendor == 0xffff) {
3501 netdev_err(mgp->dev, "device disappeared!\n");
3505 pci_restore_state(pdev);
3507 status = pci_enable_device(pdev);
3509 dev_err(&pdev->dev, "failed to enable device\n");
3513 pci_set_master(pdev);
3515 myri10ge_reset(mgp);
3516 myri10ge_dummy_rdma(mgp, 1);
3518 /* Save configuration space to be restored if the
3519 * nic resets due to a parity error */
3520 pci_save_state(pdev);
3522 if (netif_running(netdev)) {
3524 status = myri10ge_open(netdev);
3527 goto abort_with_enabled;
3530 netif_device_attach(netdev);
3535 pci_disable_device(pdev);
3539 #endif /* CONFIG_PM */
3541 static u32 myri10ge_read_reboot(struct myri10ge_priv *mgp)
3543 struct pci_dev *pdev = mgp->pdev;
3544 int vs = mgp->vendor_specific_offset;
3547 /*enter read32 mode */
3548 pci_write_config_byte(pdev, vs + 0x10, 0x3);
3550 /*read REBOOT_STATUS (0xfffffff0) */
3551 pci_write_config_dword(pdev, vs + 0x18, 0xfffffff0);
3552 pci_read_config_dword(pdev, vs + 0x14, &reboot);
3557 myri10ge_check_slice(struct myri10ge_slice_state *ss, int *reset_needed,
3558 int *busy_slice_cnt, u32 rx_pause_cnt)
3560 struct myri10ge_priv *mgp = ss->mgp;
3561 int slice = ss - mgp->ss;
3563 if (ss->tx.req != ss->tx.done &&
3564 ss->tx.done == ss->watchdog_tx_done &&
3565 ss->watchdog_tx_req != ss->watchdog_tx_done) {
3566 /* nic seems like it might be stuck.. */
3567 if (rx_pause_cnt != mgp->watchdog_pause) {
3568 if (net_ratelimit())
3569 netdev_warn(mgp->dev, "slice %d: TX paused, "
3570 "check link partner\n", slice);
3572 netdev_warn(mgp->dev,
3573 "slice %d: TX stuck %d %d %d %d %d %d\n",
3574 slice, ss->tx.queue_active, ss->tx.req,
3575 ss->tx.done, ss->tx.pkt_start,
3577 (int)ntohl(mgp->ss[slice].fw_stats->
3583 if (ss->watchdog_tx_done != ss->tx.done ||
3584 ss->watchdog_rx_done != ss->rx_done.cnt) {
3585 *busy_slice_cnt += 1;
3587 ss->watchdog_tx_done = ss->tx.done;
3588 ss->watchdog_tx_req = ss->tx.req;
3589 ss->watchdog_rx_done = ss->rx_done.cnt;
3593 * This watchdog is used to check whether the board has suffered
3594 * from a parity error and needs to be recovered.
3596 static void myri10ge_watchdog(struct work_struct *work)
3598 struct myri10ge_priv *mgp =
3599 container_of(work, struct myri10ge_priv, watchdog_work);
3600 struct myri10ge_slice_state *ss;
3601 u32 reboot, rx_pause_cnt;
3602 int status, rebooted;
3604 int reset_needed = 0;
3605 int busy_slice_cnt = 0;
3608 mgp->watchdog_resets++;
3609 pci_read_config_word(mgp->pdev, PCI_COMMAND, &cmd);
3611 if ((cmd & PCI_COMMAND_MASTER) == 0) {
3612 /* Bus master DMA disabled? Check to see
3613 * if the card rebooted due to a parity error
3614 * For now, just report it */
3615 reboot = myri10ge_read_reboot(mgp);
3616 netdev_err(mgp->dev, "NIC rebooted (0x%x),%s resetting\n",
3617 reboot, myri10ge_reset_recover ? "" : " not");
3618 if (myri10ge_reset_recover == 0)
3623 myri10ge_close(mgp->dev);
3624 myri10ge_reset_recover--;
3627 * A rebooted nic will come back with config space as
3628 * it was after power was applied to PCIe bus.
3629 * Attempt to restore config space which was saved
3630 * when the driver was loaded, or the last time the
3631 * nic was resumed from power saving mode.
3633 pci_restore_state(mgp->pdev);
3635 /* save state again for accounting reasons */
3636 pci_save_state(mgp->pdev);
3639 /* if we get back -1's from our slot, perhaps somebody
3640 * powered off our card. Don't try to reset it in
3642 if (cmd == 0xffff) {
3643 pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor);
3644 if (vendor == 0xffff) {
3645 netdev_err(mgp->dev, "device disappeared!\n");
3649 /* Perhaps it is a software error. See if stuck slice
3650 * has recovered, reset if not */
3651 rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause);
3652 for (i = 0; i < mgp->num_slices; i++) {
3655 myri10ge_check_slice(ss, &reset_needed,
3661 if (!reset_needed) {
3662 netdev_dbg(mgp->dev, "not resetting\n");
3666 netdev_err(mgp->dev, "device timeout, resetting\n");
3671 myri10ge_close(mgp->dev);
3673 status = myri10ge_load_firmware(mgp, 1);
3675 netdev_err(mgp->dev, "failed to load firmware\n");
3677 myri10ge_open(mgp->dev);
3682 * We use our own timer routine rather than relying upon
3683 * netdev->tx_timeout because we have a very large hardware transmit
3684 * queue. Due to the large queue, the netdev->tx_timeout function
3685 * cannot detect a NIC with a parity error in a timely fashion if the
3686 * NIC is lightly loaded.
3688 static void myri10ge_watchdog_timer(unsigned long arg)
3690 struct myri10ge_priv *mgp;
3691 struct myri10ge_slice_state *ss;
3692 int i, reset_needed, busy_slice_cnt;
3696 mgp = (struct myri10ge_priv *)arg;
3698 rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause);
3700 for (i = 0, reset_needed = 0;
3701 i < mgp->num_slices && reset_needed == 0; ++i) {
3704 if (ss->rx_small.watchdog_needed) {
3705 myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
3706 mgp->small_bytes + MXGEFW_PAD,
3708 if (ss->rx_small.fill_cnt - ss->rx_small.cnt >=
3709 myri10ge_fill_thresh)
3710 ss->rx_small.watchdog_needed = 0;
3712 if (ss->rx_big.watchdog_needed) {
3713 myri10ge_alloc_rx_pages(mgp, &ss->rx_big,
3715 if (ss->rx_big.fill_cnt - ss->rx_big.cnt >=
3716 myri10ge_fill_thresh)
3717 ss->rx_big.watchdog_needed = 0;
3719 myri10ge_check_slice(ss, &reset_needed, &busy_slice_cnt,
3722 /* if we've sent or received no traffic, poll the NIC to
3723 * ensure it is still there. Otherwise, we risk not noticing
3724 * an error in a timely fashion */
3725 if (busy_slice_cnt == 0) {
3726 pci_read_config_word(mgp->pdev, PCI_COMMAND, &cmd);
3727 if ((cmd & PCI_COMMAND_MASTER) == 0) {
3731 mgp->watchdog_pause = rx_pause_cnt;
3734 schedule_work(&mgp->watchdog_work);
3737 mod_timer(&mgp->watchdog_timer,
3738 jiffies + myri10ge_watchdog_timeout * HZ);
3742 static void myri10ge_free_slices(struct myri10ge_priv *mgp)
3744 struct myri10ge_slice_state *ss;
3745 struct pci_dev *pdev = mgp->pdev;
3749 if (mgp->ss == NULL)
3752 for (i = 0; i < mgp->num_slices; i++) {
3754 if (ss->rx_done.entry != NULL) {
3755 bytes = mgp->max_intr_slots *
3756 sizeof(*ss->rx_done.entry);
3757 dma_free_coherent(&pdev->dev, bytes,
3758 ss->rx_done.entry, ss->rx_done.bus);
3759 ss->rx_done.entry = NULL;
3761 if (ss->fw_stats != NULL) {
3762 bytes = sizeof(*ss->fw_stats);
3763 dma_free_coherent(&pdev->dev, bytes,
3764 ss->fw_stats, ss->fw_stats_bus);
3765 ss->fw_stats = NULL;
3767 napi_hash_del(&ss->napi);
3768 netif_napi_del(&ss->napi);
3770 /* Wait till napi structs are no longer used, and then free ss. */
3776 static int myri10ge_alloc_slices(struct myri10ge_priv *mgp)
3778 struct myri10ge_slice_state *ss;
3779 struct pci_dev *pdev = mgp->pdev;
3783 bytes = sizeof(*mgp->ss) * mgp->num_slices;
3784 mgp->ss = kzalloc(bytes, GFP_KERNEL);
3785 if (mgp->ss == NULL) {
3789 for (i = 0; i < mgp->num_slices; i++) {
3791 bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry);
3792 ss->rx_done.entry = dma_zalloc_coherent(&pdev->dev, bytes,
3795 if (ss->rx_done.entry == NULL)
3797 bytes = sizeof(*ss->fw_stats);
3798 ss->fw_stats = dma_alloc_coherent(&pdev->dev, bytes,
3801 if (ss->fw_stats == NULL)
3805 netif_napi_add(ss->dev, &ss->napi, myri10ge_poll,
3806 myri10ge_napi_weight);
3810 myri10ge_free_slices(mgp);
3815 * This function determines the number of slices supported.
3816 * The number slices is the minimum of the number of CPUS,
3817 * the number of MSI-X irqs supported, the number of slices
3818 * supported by the firmware
3820 static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
3822 struct myri10ge_cmd cmd;
3823 struct pci_dev *pdev = mgp->pdev;
3826 int i, status, ncpus;
3828 mgp->num_slices = 1;
3829 ncpus = netif_get_num_default_rss_queues();
3831 if (myri10ge_max_slices == 1 || !pdev->msix_cap ||
3832 (myri10ge_max_slices == -1 && ncpus < 2))
3835 /* try to load the slice aware rss firmware */
3836 old_fw = mgp->fw_name;
3837 old_allocated = mgp->fw_name_allocated;
3838 /* don't free old_fw if we override it. */
3839 mgp->fw_name_allocated = false;
3841 if (myri10ge_fw_name != NULL) {
3842 dev_info(&mgp->pdev->dev, "overriding rss firmware to %s\n",
3844 set_fw_name(mgp, myri10ge_fw_name, false);
3845 } else if (old_fw == myri10ge_fw_aligned)
3846 set_fw_name(mgp, myri10ge_fw_rss_aligned, false);
3848 set_fw_name(mgp, myri10ge_fw_rss_unaligned, false);
3849 status = myri10ge_load_firmware(mgp, 0);
3851 dev_info(&pdev->dev, "Rss firmware not found\n");
3857 /* hit the board with a reset to ensure it is alive */
3858 memset(&cmd, 0, sizeof(cmd));
3859 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_RESET, &cmd, 0);
3861 dev_err(&mgp->pdev->dev, "failed reset\n");
3865 mgp->max_intr_slots = cmd.data0 / sizeof(struct mcp_slot);
3867 /* tell it the size of the interrupt queues */
3868 cmd.data0 = mgp->max_intr_slots * sizeof(struct mcp_slot);
3869 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0);
3871 dev_err(&mgp->pdev->dev, "failed MXGEFW_CMD_SET_INTRQ_SIZE\n");
3875 /* ask the maximum number of slices it supports */
3876 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_RSS_QUEUES, &cmd, 0);
3880 mgp->num_slices = cmd.data0;
3882 /* Only allow multiple slices if MSI-X is usable */
3883 if (!myri10ge_msi) {
3887 /* if the admin did not specify a limit to how many
3888 * slices we should use, cap it automatically to the
3889 * number of CPUs currently online */
3890 if (myri10ge_max_slices == -1)
3891 myri10ge_max_slices = ncpus;
3893 if (mgp->num_slices > myri10ge_max_slices)
3894 mgp->num_slices = myri10ge_max_slices;
3896 /* Now try to allocate as many MSI-X vectors as we have
3897 * slices. We give up on MSI-X if we can only get a single
3900 mgp->msix_vectors = kcalloc(mgp->num_slices, sizeof(*mgp->msix_vectors),
3902 if (mgp->msix_vectors == NULL)
3904 for (i = 0; i < mgp->num_slices; i++) {
3905 mgp->msix_vectors[i].entry = i;
3908 while (mgp->num_slices > 1) {
3909 mgp->num_slices = rounddown_pow_of_two(mgp->num_slices);
3910 if (mgp->num_slices == 1)
3912 status = pci_enable_msix_range(pdev,
3919 pci_disable_msix(pdev);
3921 if (status == mgp->num_slices) {
3926 mgp->num_slices = status;
3931 if (mgp->msix_vectors != NULL) {
3932 kfree(mgp->msix_vectors);
3933 mgp->msix_vectors = NULL;
3937 mgp->num_slices = 1;
3938 set_fw_name(mgp, old_fw, old_allocated);
3939 myri10ge_load_firmware(mgp, 0);
3942 static const struct net_device_ops myri10ge_netdev_ops = {
3943 .ndo_open = myri10ge_open,
3944 .ndo_stop = myri10ge_close,
3945 .ndo_start_xmit = myri10ge_xmit,
3946 .ndo_get_stats64 = myri10ge_get_stats,
3947 .ndo_validate_addr = eth_validate_addr,
3948 .ndo_change_mtu = myri10ge_change_mtu,
3949 .ndo_set_rx_mode = myri10ge_set_multicast_list,
3950 .ndo_set_mac_address = myri10ge_set_mac_address,
3951 #ifdef CONFIG_NET_RX_BUSY_POLL
3952 .ndo_busy_poll = myri10ge_busy_poll,
3956 static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3958 struct net_device *netdev;
3959 struct myri10ge_priv *mgp;
3960 struct device *dev = &pdev->dev;
3962 int status = -ENXIO;
3964 unsigned hdr_offset, ss_offset;
3965 static int board_number;
3967 netdev = alloc_etherdev_mq(sizeof(*mgp), MYRI10GE_MAX_SLICES);
3971 SET_NETDEV_DEV(netdev, &pdev->dev);
3973 mgp = netdev_priv(netdev);
3976 mgp->pause = myri10ge_flow_control;
3977 mgp->intr_coal_delay = myri10ge_intr_coal_delay;
3978 mgp->msg_enable = netif_msg_init(myri10ge_debug, MYRI10GE_MSG_DEFAULT);
3979 mgp->board_number = board_number;
3980 init_waitqueue_head(&mgp->down_wq);
3982 if (pci_enable_device(pdev)) {
3983 dev_err(&pdev->dev, "pci_enable_device call failed\n");
3985 goto abort_with_netdev;
3988 /* Find the vendor-specific cap so we can check
3989 * the reboot register later on */
3990 mgp->vendor_specific_offset
3991 = pci_find_capability(pdev, PCI_CAP_ID_VNDR);
3993 /* Set our max read request to 4KB */
3994 status = pcie_set_readrq(pdev, 4096);
3996 dev_err(&pdev->dev, "Error %d writing PCI_EXP_DEVCTL\n",
3998 goto abort_with_enabled;
4001 myri10ge_mask_surprise_down(pdev);
4002 pci_set_master(pdev);
4004 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4008 "64-bit pci address mask was refused, "
4010 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4013 dev_err(&pdev->dev, "Error %d setting DMA mask\n", status);
4014 goto abort_with_enabled;
4016 (void)pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4017 mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd),
4018 &mgp->cmd_bus, GFP_KERNEL);
4021 goto abort_with_enabled;
4024 mgp->board_span = pci_resource_len(pdev, 0);
4025 mgp->iomem_base = pci_resource_start(pdev, 0);
4026 mgp->wc_cookie = arch_phys_wc_add(mgp->iomem_base, mgp->board_span);
4027 mgp->sram = ioremap_wc(mgp->iomem_base, mgp->board_span);
4028 if (mgp->sram == NULL) {
4029 dev_err(&pdev->dev, "ioremap failed for %ld bytes at 0x%lx\n",
4030 mgp->board_span, mgp->iomem_base);
4032 goto abort_with_mtrr;
4035 swab32(readl(mgp->sram + MCP_HEADER_PTR_OFFSET)) & 0xffffc;
4036 ss_offset = hdr_offset + offsetof(struct mcp_gen_header, string_specs);
4037 mgp->sram_size = swab32(readl(mgp->sram + ss_offset));
4038 if (mgp->sram_size > mgp->board_span ||
4039 mgp->sram_size <= MYRI10GE_FW_OFFSET) {
4041 "invalid sram_size %dB or board span %ldB\n",
4042 mgp->sram_size, mgp->board_span);
4044 goto abort_with_ioremap;
4046 memcpy_fromio(mgp->eeprom_strings,
4047 mgp->sram + mgp->sram_size, MYRI10GE_EEPROM_STRINGS_SIZE);
4048 memset(mgp->eeprom_strings + MYRI10GE_EEPROM_STRINGS_SIZE - 2, 0, 2);
4049 status = myri10ge_read_mac_addr(mgp);
4051 goto abort_with_ioremap;
4053 for (i = 0; i < ETH_ALEN; i++)
4054 netdev->dev_addr[i] = mgp->mac_addr[i];
4056 myri10ge_select_firmware(mgp);
4058 status = myri10ge_load_firmware(mgp, 1);
4060 dev_err(&pdev->dev, "failed to load firmware\n");
4061 goto abort_with_ioremap;
4063 myri10ge_probe_slices(mgp);
4064 status = myri10ge_alloc_slices(mgp);
4066 dev_err(&pdev->dev, "failed to alloc slice state\n");
4067 goto abort_with_firmware;
4069 netif_set_real_num_tx_queues(netdev, mgp->num_slices);
4070 netif_set_real_num_rx_queues(netdev, mgp->num_slices);
4071 status = myri10ge_reset(mgp);
4073 dev_err(&pdev->dev, "failed reset\n");
4074 goto abort_with_slices;
4076 #ifdef CONFIG_MYRI10GE_DCA
4077 myri10ge_setup_dca(mgp);
4079 pci_set_drvdata(pdev, mgp);
4080 if ((myri10ge_initial_mtu + ETH_HLEN) > MYRI10GE_MAX_ETHER_MTU)
4081 myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN;
4082 if ((myri10ge_initial_mtu + ETH_HLEN) < 68)
4083 myri10ge_initial_mtu = 68;
4085 netdev->netdev_ops = &myri10ge_netdev_ops;
4086 netdev->mtu = myri10ge_initial_mtu;
4087 netdev->hw_features = mgp->features | NETIF_F_RXCSUM;
4089 /* fake NETIF_F_HW_VLAN_CTAG_RX for good GRO performance */
4090 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
4092 netdev->features = netdev->hw_features;
4095 netdev->features |= NETIF_F_HIGHDMA;
4097 netdev->vlan_features |= mgp->features;
4098 if (mgp->fw_ver_tiny < 37)
4099 netdev->vlan_features &= ~NETIF_F_TSO6;
4100 if (mgp->fw_ver_tiny < 32)
4101 netdev->vlan_features &= ~NETIF_F_TSO;
4103 /* make sure we can get an irq, and that MSI can be
4104 * setup (if available). */
4105 status = myri10ge_request_irq(mgp);
4107 goto abort_with_slices;
4108 myri10ge_free_irq(mgp);
4110 /* Save configuration space to be restored if the
4111 * nic resets due to a parity error */
4112 pci_save_state(pdev);
4114 /* Setup the watchdog timer */
4115 setup_timer(&mgp->watchdog_timer, myri10ge_watchdog_timer,
4116 (unsigned long)mgp);
4118 netdev->ethtool_ops = &myri10ge_ethtool_ops;
4119 INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog);
4120 status = register_netdev(netdev);
4122 dev_err(&pdev->dev, "register_netdev failed: %d\n", status);
4123 goto abort_with_state;
4125 if (mgp->msix_enabled)
4126 dev_info(dev, "%d MSI-X IRQs, tx bndry %d, fw %s, MTRR %s, WC Enabled\n",
4127 mgp->num_slices, mgp->tx_boundary, mgp->fw_name,
4128 (mgp->wc_cookie > 0 ? "Enabled" : "Disabled"));
4130 dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, MTRR %s, WC Enabled\n",
4131 mgp->msi_enabled ? "MSI" : "xPIC",
4132 pdev->irq, mgp->tx_boundary, mgp->fw_name,
4133 (mgp->wc_cookie > 0 ? "Enabled" : "Disabled"));
4139 pci_restore_state(pdev);
4142 myri10ge_free_slices(mgp);
4144 abort_with_firmware:
4145 myri10ge_dummy_rdma(mgp, 0);
4148 if (mgp->mac_addr_string != NULL)
4150 "myri10ge_probe() failed: MAC=%s, SN=%ld\n",
4151 mgp->mac_addr_string, mgp->serial_number);
4155 arch_phys_wc_del(mgp->wc_cookie);
4156 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
4157 mgp->cmd, mgp->cmd_bus);
4160 pci_disable_device(pdev);
4163 set_fw_name(mgp, NULL, false);
4164 free_netdev(netdev);
4171 * Does what is necessary to shutdown one Myrinet device. Called
4172 * once for each Myrinet card by the kernel when a module is
4175 static void myri10ge_remove(struct pci_dev *pdev)
4177 struct myri10ge_priv *mgp;
4178 struct net_device *netdev;
4180 mgp = pci_get_drvdata(pdev);
4184 cancel_work_sync(&mgp->watchdog_work);
4186 unregister_netdev(netdev);
4188 #ifdef CONFIG_MYRI10GE_DCA
4189 myri10ge_teardown_dca(mgp);
4191 myri10ge_dummy_rdma(mgp, 0);
4193 /* avoid a memory leak */
4194 pci_restore_state(pdev);
4197 arch_phys_wc_del(mgp->wc_cookie);
4198 myri10ge_free_slices(mgp);
4199 kfree(mgp->msix_vectors);
4200 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
4201 mgp->cmd, mgp->cmd_bus);
4203 set_fw_name(mgp, NULL, false);
4204 free_netdev(netdev);
4205 pci_disable_device(pdev);
4208 #define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 0x0008
4209 #define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9 0x0009
4211 static const struct pci_device_id myri10ge_pci_tbl[] = {
4212 {PCI_DEVICE(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E)},
4214 (PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9)},
4218 MODULE_DEVICE_TABLE(pci, myri10ge_pci_tbl);
4220 static struct pci_driver myri10ge_driver = {
4222 .probe = myri10ge_probe,
4223 .remove = myri10ge_remove,
4224 .id_table = myri10ge_pci_tbl,
4226 .suspend = myri10ge_suspend,
4227 .resume = myri10ge_resume,
4231 #ifdef CONFIG_MYRI10GE_DCA
4233 myri10ge_notify_dca(struct notifier_block *nb, unsigned long event, void *p)
4235 int err = driver_for_each_device(&myri10ge_driver.driver,
4237 myri10ge_notify_dca_device);
4244 static struct notifier_block myri10ge_dca_notifier = {
4245 .notifier_call = myri10ge_notify_dca,
4249 #endif /* CONFIG_MYRI10GE_DCA */
4251 static __init int myri10ge_init_module(void)
4253 pr_info("Version %s\n", MYRI10GE_VERSION_STR);
4255 if (myri10ge_rss_hash > MXGEFW_RSS_HASH_TYPE_MAX) {
4256 pr_err("Illegal rssh hash type %d, defaulting to source port\n",
4258 myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_PORT;
4260 #ifdef CONFIG_MYRI10GE_DCA
4261 dca_register_notify(&myri10ge_dca_notifier);
4263 if (myri10ge_max_slices > MYRI10GE_MAX_SLICES)
4264 myri10ge_max_slices = MYRI10GE_MAX_SLICES;
4266 return pci_register_driver(&myri10ge_driver);
4269 module_init(myri10ge_init_module);
4271 static __exit void myri10ge_cleanup_module(void)
4273 #ifdef CONFIG_MYRI10GE_DCA
4274 dca_unregister_notify(&myri10ge_dca_notifier);
4276 pci_unregister_driver(&myri10ge_driver);
4279 module_exit(myri10ge_cleanup_module);