2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/init.h>
38 #include <linux/pci.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/if_vlan.h>
43 #include <linux/mdio.h>
44 #include <linux/sockios.h>
45 #include <linux/workqueue.h>
46 #include <linux/proc_fs.h>
47 #include <linux/rtnetlink.h>
48 #include <linux/firmware.h>
49 #include <linux/log2.h>
50 #include <linux/stringify.h>
51 #include <linux/sched.h>
52 #include <linux/slab.h>
53 #include <linux/uaccess.h>
54 #include <linux/nospec.h>
57 #include "cxgb3_ioctl.h"
59 #include "cxgb3_offload.h"
62 #include "cxgb3_ctl_defs.h"
64 #include "firmware_exports.h"
67 MAX_TXQ_ENTRIES = 16384,
68 MAX_CTRL_TXQ_ENTRIES = 1024,
69 MAX_RSPQ_ENTRIES = 16384,
70 MAX_RX_BUFFERS = 16384,
71 MAX_RX_JUMBO_BUFFERS = 16384,
73 MIN_CTRL_TXQ_ENTRIES = 4,
74 MIN_RSPQ_ENTRIES = 32,
78 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
80 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
81 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
82 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
84 #define EEPROM_MAGIC 0x38E2F10C
86 #define CH_DEVICE(devid, idx) \
87 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
89 static const struct pci_device_id cxgb3_pci_tbl[] = {
90 CH_DEVICE(0x20, 0), /* PE9000 */
91 CH_DEVICE(0x21, 1), /* T302E */
92 CH_DEVICE(0x22, 2), /* T310E */
93 CH_DEVICE(0x23, 3), /* T320X */
94 CH_DEVICE(0x24, 1), /* T302X */
95 CH_DEVICE(0x25, 3), /* T320E */
96 CH_DEVICE(0x26, 2), /* T310X */
97 CH_DEVICE(0x30, 2), /* T3B10 */
98 CH_DEVICE(0x31, 3), /* T3B20 */
99 CH_DEVICE(0x32, 1), /* T3B02 */
100 CH_DEVICE(0x35, 6), /* T3C20-derived T3C10 */
101 CH_DEVICE(0x36, 3), /* S320E-CR */
102 CH_DEVICE(0x37, 7), /* N320E-G2 */
106 MODULE_DESCRIPTION(DRV_DESC);
107 MODULE_AUTHOR("Chelsio Communications");
108 MODULE_LICENSE("Dual BSD/GPL");
109 MODULE_VERSION(DRV_VERSION);
110 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
112 static int dflt_msg_enable = DFLT_MSG_ENABLE;
114 module_param(dflt_msg_enable, int, 0644);
115 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
118 * The driver uses the best interrupt scheme available on a platform in the
119 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
120 * of these schemes the driver may consider as follows:
122 * msi = 2: choose from among all three options
123 * msi = 1: only consider MSI and pin interrupts
124 * msi = 0: force pin interrupts
128 module_param(msi, int, 0644);
129 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
132 * The driver enables offload as a default.
133 * To disable it, use ofld_disable = 1.
136 static int ofld_disable = 0;
138 module_param(ofld_disable, int, 0644);
139 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
142 * We have work elements that we need to cancel when an interface is taken
143 * down. Normally the work elements would be executed by keventd but that
144 * can deadlock because of linkwatch. If our close method takes the rtnl
145 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
146 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
147 * for our work to complete. Get our own work queue to solve this.
149 struct workqueue_struct *cxgb3_wq;
152 * link_report - show link status and link speed/duplex
153 * @p: the port whose settings are to be reported
155 * Shows the link status, speed, and duplex of a port.
157 static void link_report(struct net_device *dev)
159 if (!netif_carrier_ok(dev))
160 netdev_info(dev, "link down\n");
162 const char *s = "10Mbps";
163 const struct port_info *p = netdev_priv(dev);
165 switch (p->link_config.speed) {
177 netdev_info(dev, "link up, %s, %s-duplex\n",
178 s, p->link_config.duplex == DUPLEX_FULL
183 static void enable_tx_fifo_drain(struct adapter *adapter,
184 struct port_info *pi)
186 t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0,
188 t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0);
189 t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN);
190 t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN);
193 static void disable_tx_fifo_drain(struct adapter *adapter,
194 struct port_info *pi)
196 t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
200 void t3_os_link_fault(struct adapter *adap, int port_id, int state)
202 struct net_device *dev = adap->port[port_id];
203 struct port_info *pi = netdev_priv(dev);
205 if (state == netif_carrier_ok(dev))
209 struct cmac *mac = &pi->mac;
211 netif_carrier_on(dev);
213 disable_tx_fifo_drain(adap, pi);
215 /* Clear local faults */
216 t3_xgm_intr_disable(adap, pi->port_id);
217 t3_read_reg(adap, A_XGM_INT_STATUS +
220 A_XGM_INT_CAUSE + pi->mac.offset,
223 t3_set_reg_field(adap,
226 F_XGM_INT, F_XGM_INT);
227 t3_xgm_intr_enable(adap, pi->port_id);
229 t3_mac_enable(mac, MAC_DIRECTION_TX);
231 netif_carrier_off(dev);
234 enable_tx_fifo_drain(adap, pi);
240 * t3_os_link_changed - handle link status changes
241 * @adapter: the adapter associated with the link change
242 * @port_id: the port index whose limk status has changed
243 * @link_stat: the new status of the link
244 * @speed: the new speed setting
245 * @duplex: the new duplex setting
246 * @pause: the new flow-control setting
248 * This is the OS-dependent handler for link status changes. The OS
249 * neutral handler takes care of most of the processing for these events,
250 * then calls this handler for any OS-specific processing.
252 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
253 int speed, int duplex, int pause)
255 struct net_device *dev = adapter->port[port_id];
256 struct port_info *pi = netdev_priv(dev);
257 struct cmac *mac = &pi->mac;
259 /* Skip changes from disabled ports. */
260 if (!netif_running(dev))
263 if (link_stat != netif_carrier_ok(dev)) {
265 disable_tx_fifo_drain(adapter, pi);
267 t3_mac_enable(mac, MAC_DIRECTION_RX);
269 /* Clear local faults */
270 t3_xgm_intr_disable(adapter, pi->port_id);
271 t3_read_reg(adapter, A_XGM_INT_STATUS +
273 t3_write_reg(adapter,
274 A_XGM_INT_CAUSE + pi->mac.offset,
277 t3_set_reg_field(adapter,
278 A_XGM_INT_ENABLE + pi->mac.offset,
279 F_XGM_INT, F_XGM_INT);
280 t3_xgm_intr_enable(adapter, pi->port_id);
282 netif_carrier_on(dev);
284 netif_carrier_off(dev);
286 t3_xgm_intr_disable(adapter, pi->port_id);
287 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
288 t3_set_reg_field(adapter,
289 A_XGM_INT_ENABLE + pi->mac.offset,
293 pi->phy.ops->power_down(&pi->phy, 1);
295 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
296 t3_mac_disable(mac, MAC_DIRECTION_RX);
297 t3_link_start(&pi->phy, mac, &pi->link_config);
300 enable_tx_fifo_drain(adapter, pi);
308 * t3_os_phymod_changed - handle PHY module changes
309 * @phy: the PHY reporting the module change
310 * @mod_type: new module type
312 * This is the OS-dependent handler for PHY module changes. It is
313 * invoked when a PHY module is removed or inserted for any OS-specific
316 void t3_os_phymod_changed(struct adapter *adap, int port_id)
318 static const char *mod_str[] = {
319 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
322 const struct net_device *dev = adap->port[port_id];
323 const struct port_info *pi = netdev_priv(dev);
325 if (pi->phy.modtype == phy_modtype_none)
326 netdev_info(dev, "PHY module unplugged\n");
328 netdev_info(dev, "%s PHY module inserted\n",
329 mod_str[pi->phy.modtype]);
332 static void cxgb_set_rxmode(struct net_device *dev)
334 struct port_info *pi = netdev_priv(dev);
336 t3_mac_set_rx_mode(&pi->mac, dev);
340 * link_start - enable a port
341 * @dev: the device to enable
343 * Performs the MAC and PHY actions needed to enable a port.
345 static void link_start(struct net_device *dev)
347 struct port_info *pi = netdev_priv(dev);
348 struct cmac *mac = &pi->mac;
351 t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
352 t3_mac_set_mtu(mac, dev->mtu);
353 t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
354 t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
355 t3_mac_set_rx_mode(mac, dev);
356 t3_link_start(&pi->phy, mac, &pi->link_config);
357 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
360 static inline void cxgb_disable_msi(struct adapter *adapter)
362 if (adapter->flags & USING_MSIX) {
363 pci_disable_msix(adapter->pdev);
364 adapter->flags &= ~USING_MSIX;
365 } else if (adapter->flags & USING_MSI) {
366 pci_disable_msi(adapter->pdev);
367 adapter->flags &= ~USING_MSI;
372 * Interrupt handler for asynchronous events used with MSI-X.
374 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
376 t3_slow_intr_handler(cookie);
381 * Name the MSI-X interrupts.
383 static void name_msix_vecs(struct adapter *adap)
385 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
387 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
388 adap->msix_info[0].desc[n] = 0;
390 for_each_port(adap, j) {
391 struct net_device *d = adap->port[j];
392 const struct port_info *pi = netdev_priv(d);
394 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
395 snprintf(adap->msix_info[msi_idx].desc, n,
396 "%s-%d", d->name, pi->first_qset + i);
397 adap->msix_info[msi_idx].desc[n] = 0;
402 static int request_msix_data_irqs(struct adapter *adap)
404 int i, j, err, qidx = 0;
406 for_each_port(adap, i) {
407 int nqsets = adap2pinfo(adap, i)->nqsets;
409 for (j = 0; j < nqsets; ++j) {
410 err = request_irq(adap->msix_info[qidx + 1].vec,
411 t3_intr_handler(adap,
414 adap->msix_info[qidx + 1].desc,
415 &adap->sge.qs[qidx]);
418 free_irq(adap->msix_info[qidx + 1].vec,
419 &adap->sge.qs[qidx]);
428 static void free_irq_resources(struct adapter *adapter)
430 if (adapter->flags & USING_MSIX) {
433 free_irq(adapter->msix_info[0].vec, adapter);
434 for_each_port(adapter, i)
435 n += adap2pinfo(adapter, i)->nqsets;
437 for (i = 0; i < n; ++i)
438 free_irq(adapter->msix_info[i + 1].vec,
439 &adapter->sge.qs[i]);
441 free_irq(adapter->pdev->irq, adapter);
444 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
449 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
457 static int init_tp_parity(struct adapter *adap)
461 struct cpl_set_tcb_field *greq;
462 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
464 t3_tp_set_offload_mode(adap, 1);
466 for (i = 0; i < 16; i++) {
467 struct cpl_smt_write_req *req;
469 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
471 skb = adap->nofail_skb;
475 req = __skb_put_zero(skb, sizeof(*req));
476 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
477 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
478 req->mtu_idx = NMTUS - 1;
480 t3_mgmt_tx(adap, skb);
481 if (skb == adap->nofail_skb) {
482 await_mgmt_replies(adap, cnt, i + 1);
483 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
484 if (!adap->nofail_skb)
489 for (i = 0; i < 2048; i++) {
490 struct cpl_l2t_write_req *req;
492 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
494 skb = adap->nofail_skb;
498 req = __skb_put_zero(skb, sizeof(*req));
499 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
500 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
501 req->params = htonl(V_L2T_W_IDX(i));
502 t3_mgmt_tx(adap, skb);
503 if (skb == adap->nofail_skb) {
504 await_mgmt_replies(adap, cnt, 16 + i + 1);
505 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
506 if (!adap->nofail_skb)
511 for (i = 0; i < 2048; i++) {
512 struct cpl_rte_write_req *req;
514 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
516 skb = adap->nofail_skb;
520 req = __skb_put_zero(skb, sizeof(*req));
521 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
522 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
523 req->l2t_idx = htonl(V_L2T_W_IDX(i));
524 t3_mgmt_tx(adap, skb);
525 if (skb == adap->nofail_skb) {
526 await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
527 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
528 if (!adap->nofail_skb)
533 skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
535 skb = adap->nofail_skb;
539 greq = __skb_put_zero(skb, sizeof(*greq));
540 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
541 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
542 greq->mask = cpu_to_be64(1);
543 t3_mgmt_tx(adap, skb);
545 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
546 if (skb == adap->nofail_skb) {
547 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
548 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
551 t3_tp_set_offload_mode(adap, 0);
555 t3_tp_set_offload_mode(adap, 0);
560 * setup_rss - configure RSS
563 * Sets up RSS to distribute packets to multiple receive queues. We
564 * configure the RSS CPU lookup table to distribute to the number of HW
565 * receive queues, and the response queue lookup table to narrow that
566 * down to the response queues actually configured for each port.
567 * We always configure the RSS mapping for two ports since the mapping
568 * table has plenty of entries.
570 static void setup_rss(struct adapter *adap)
573 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
574 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
575 u8 cpus[SGE_QSETS + 1];
576 u16 rspq_map[RSS_TABLE_SIZE + 1];
578 for (i = 0; i < SGE_QSETS; ++i)
580 cpus[SGE_QSETS] = 0xff; /* terminator */
582 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
583 rspq_map[i] = i % nq0;
584 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
586 rspq_map[RSS_TABLE_SIZE] = 0xffff; /* terminator */
588 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
589 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
590 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
593 static void ring_dbs(struct adapter *adap)
597 for (i = 0; i < SGE_QSETS; i++) {
598 struct sge_qset *qs = &adap->sge.qs[i];
601 for (j = 0; j < SGE_TXQ_PER_SET; j++)
602 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
606 static void init_napi(struct adapter *adap)
610 for (i = 0; i < SGE_QSETS; i++) {
611 struct sge_qset *qs = &adap->sge.qs[i];
614 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
619 * netif_napi_add() can be called only once per napi_struct because it
620 * adds each new napi_struct to a list. Be careful not to call it a
621 * second time, e.g., during EEH recovery, by making a note of it.
623 adap->flags |= NAPI_INIT;
627 * Wait until all NAPI handlers are descheduled. This includes the handlers of
628 * both netdevices representing interfaces and the dummy ones for the extra
631 static void quiesce_rx(struct adapter *adap)
635 for (i = 0; i < SGE_QSETS; i++)
636 if (adap->sge.qs[i].adap)
637 napi_disable(&adap->sge.qs[i].napi);
640 static void enable_all_napi(struct adapter *adap)
643 for (i = 0; i < SGE_QSETS; i++)
644 if (adap->sge.qs[i].adap)
645 napi_enable(&adap->sge.qs[i].napi);
649 * setup_sge_qsets - configure SGE Tx/Rx/response queues
652 * Determines how many sets of SGE queues to use and initializes them.
653 * We support multiple queue sets per port if we have MSI-X, otherwise
654 * just one queue set per port.
656 static int setup_sge_qsets(struct adapter *adap)
658 int i, j, err, irq_idx = 0, qset_idx = 0;
659 unsigned int ntxq = SGE_TXQ_PER_SET;
661 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
664 for_each_port(adap, i) {
665 struct net_device *dev = adap->port[i];
666 struct port_info *pi = netdev_priv(dev);
668 pi->qs = &adap->sge.qs[pi->first_qset];
669 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
670 err = t3_sge_alloc_qset(adap, qset_idx, 1,
671 (adap->flags & USING_MSIX) ? qset_idx + 1 :
673 &adap->params.sge.qset[qset_idx], ntxq, dev,
674 netdev_get_tx_queue(dev, j));
676 t3_free_sge_resources(adap);
685 static ssize_t attr_show(struct device *d, char *buf,
686 ssize_t(*format) (struct net_device *, char *))
690 /* Synchronize with ioctls that may shut down the device */
692 len = (*format) (to_net_dev(d), buf);
697 static ssize_t attr_store(struct device *d,
698 const char *buf, size_t len,
699 ssize_t(*set) (struct net_device *, unsigned int),
700 unsigned int min_val, unsigned int max_val)
705 if (!capable(CAP_NET_ADMIN))
708 ret = kstrtouint(buf, 0, &val);
711 if (val < min_val || val > max_val)
715 ret = (*set) (to_net_dev(d), val);
722 #define CXGB3_SHOW(name, val_expr) \
723 static ssize_t format_##name(struct net_device *dev, char *buf) \
725 struct port_info *pi = netdev_priv(dev); \
726 struct adapter *adap = pi->adapter; \
727 return sprintf(buf, "%u\n", val_expr); \
729 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
732 return attr_show(d, buf, format_##name); \
735 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
737 struct port_info *pi = netdev_priv(dev);
738 struct adapter *adap = pi->adapter;
739 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
741 if (adap->flags & FULL_INIT_DONE)
743 if (val && adap->params.rev == 0)
745 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
748 adap->params.mc5.nfilters = val;
752 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
753 const char *buf, size_t len)
755 return attr_store(d, buf, len, set_nfilters, 0, ~0);
758 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
760 struct port_info *pi = netdev_priv(dev);
761 struct adapter *adap = pi->adapter;
763 if (adap->flags & FULL_INIT_DONE)
765 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
768 adap->params.mc5.nservers = val;
772 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
773 const char *buf, size_t len)
775 return attr_store(d, buf, len, set_nservers, 0, ~0);
778 #define CXGB3_ATTR_R(name, val_expr) \
779 CXGB3_SHOW(name, val_expr) \
780 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
782 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
783 CXGB3_SHOW(name, val_expr) \
784 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
786 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
787 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
788 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
790 static struct attribute *cxgb3_attrs[] = {
791 &dev_attr_cam_size.attr,
792 &dev_attr_nfilters.attr,
793 &dev_attr_nservers.attr,
797 static const struct attribute_group cxgb3_attr_group = {
798 .attrs = cxgb3_attrs,
801 static ssize_t tm_attr_show(struct device *d,
802 char *buf, int sched)
804 struct port_info *pi = netdev_priv(to_net_dev(d));
805 struct adapter *adap = pi->adapter;
806 unsigned int v, addr, bpt, cpt;
809 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
811 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
812 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
815 bpt = (v >> 8) & 0xff;
818 len = sprintf(buf, "disabled\n");
820 v = (adap->params.vpd.cclk * 1000) / cpt;
821 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
827 static ssize_t tm_attr_store(struct device *d,
828 const char *buf, size_t len, int sched)
830 struct port_info *pi = netdev_priv(to_net_dev(d));
831 struct adapter *adap = pi->adapter;
835 if (!capable(CAP_NET_ADMIN))
838 ret = kstrtouint(buf, 0, &val);
845 ret = t3_config_sched(adap, val, sched);
852 #define TM_ATTR(name, sched) \
853 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
856 return tm_attr_show(d, buf, sched); \
858 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
859 const char *buf, size_t len) \
861 return tm_attr_store(d, buf, len, sched); \
863 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
874 static struct attribute *offload_attrs[] = {
875 &dev_attr_sched0.attr,
876 &dev_attr_sched1.attr,
877 &dev_attr_sched2.attr,
878 &dev_attr_sched3.attr,
879 &dev_attr_sched4.attr,
880 &dev_attr_sched5.attr,
881 &dev_attr_sched6.attr,
882 &dev_attr_sched7.attr,
886 static const struct attribute_group offload_attr_group = {
887 .attrs = offload_attrs,
891 * Sends an sk_buff to an offload queue driver
892 * after dealing with any active network taps.
894 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
899 ret = t3_offload_tx(tdev, skb);
904 static int write_smt_entry(struct adapter *adapter, int idx)
906 struct cpl_smt_write_req *req;
907 struct port_info *pi = netdev_priv(adapter->port[idx]);
908 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
913 req = __skb_put(skb, sizeof(*req));
914 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
915 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
916 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
918 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
919 memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
921 offload_tx(&adapter->tdev, skb);
925 static int init_smt(struct adapter *adapter)
929 for_each_port(adapter, i)
930 write_smt_entry(adapter, i);
934 static void init_port_mtus(struct adapter *adapter)
936 unsigned int mtus = adapter->port[0]->mtu;
938 if (adapter->port[1])
939 mtus |= adapter->port[1]->mtu << 16;
940 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
943 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
947 struct mngt_pktsched_wr *req;
950 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
952 skb = adap->nofail_skb;
956 req = skb_put(skb, sizeof(*req));
957 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
958 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
964 ret = t3_mgmt_tx(adap, skb);
965 if (skb == adap->nofail_skb) {
966 adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
968 if (!adap->nofail_skb)
975 static int bind_qsets(struct adapter *adap)
979 for_each_port(adap, i) {
980 const struct port_info *pi = adap2pinfo(adap, i);
982 for (j = 0; j < pi->nqsets; ++j) {
983 int ret = send_pktsched_cmd(adap, 1,
984 pi->first_qset + j, -1,
995 #define FW_FNAME "/*(DEBLOBBED)*/"
997 #define TPSRAM_NAME "/*(DEBLOBBED)*/"
998 #define AEL2005_OPT_EDC_NAME "/*(DEBLOBBED)*/"
999 #define AEL2005_TWX_EDC_NAME "/*(DEBLOBBED)*/"
1000 #define AEL2020_TWX_EDC_NAME "/*(DEBLOBBED)*/"
1003 static inline const char *get_edc_fw_name(int edc_idx)
1005 const char *fw_name = NULL;
1008 case EDC_OPT_AEL2005:
1009 fw_name = AEL2005_OPT_EDC_NAME;
1011 case EDC_TWX_AEL2005:
1012 fw_name = AEL2005_TWX_EDC_NAME;
1014 case EDC_TWX_AEL2020:
1015 fw_name = AEL2020_TWX_EDC_NAME;
1021 int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
1023 struct adapter *adapter = phy->adapter;
1024 const struct firmware *fw;
1025 const char *fw_name;
1028 u16 *cache = phy->phy_cache;
1029 int i, ret = -EINVAL;
1031 fw_name = get_edc_fw_name(edc_idx);
1033 ret = reject_firmware(&fw, fw_name, &adapter->pdev->dev);
1035 dev_err(&adapter->pdev->dev,
1036 "could not upgrade firmware: unable to load %s\n",
1041 /* check size, take checksum in account */
1042 if (fw->size > size + 4) {
1043 CH_ERR(adapter, "firmware image too large %u, expected %d\n",
1044 (unsigned int)fw->size, size + 4);
1048 /* compute checksum */
1049 p = (const __be32 *)fw->data;
1050 for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++)
1051 csum += ntohl(p[i]);
1053 if (csum != 0xffffffff) {
1054 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1059 for (i = 0; i < size / 4 ; i++) {
1060 *cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
1061 *cache++ = be32_to_cpu(p[i]) & 0xffff;
1064 release_firmware(fw);
1069 static int upgrade_fw(struct adapter *adap)
1072 const struct firmware *fw;
1073 struct device *dev = &adap->pdev->dev;
1075 ret = reject_firmware(&fw, FW_FNAME, dev);
1077 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
1081 ret = t3_load_fw(adap, fw->data, fw->size);
1082 release_firmware(fw);
1085 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
1086 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1088 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
1089 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1094 static inline char t3rev2char(struct adapter *adapter)
1098 switch(adapter->params.rev) {
1110 static int update_tpsram(struct adapter *adap)
1112 const struct firmware *tpsram;
1114 struct device *dev = &adap->pdev->dev;
1118 rev = t3rev2char(adap);
1122 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev);
1124 ret = reject_firmware(&tpsram, buf, dev);
1126 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
1131 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
1133 goto release_tpsram;
1135 ret = t3_set_proto_sram(adap, tpsram->data);
1138 "successful update of protocol engine "
1140 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1142 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
1143 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1145 dev_err(dev, "loading protocol SRAM failed\n");
1148 release_firmware(tpsram);
1154 * t3_synchronize_rx - wait for current Rx processing on a port to complete
1155 * @adap: the adapter
1158 * Ensures that current Rx processing on any of the queues associated with
1159 * the given port completes before returning. We do this by acquiring and
1160 * releasing the locks of the response queues associated with the port.
1162 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
1166 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1167 struct sge_rspq *q = &adap->sge.qs[i].rspq;
1169 spin_lock_irq(&q->lock);
1170 spin_unlock_irq(&q->lock);
1174 static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
1176 struct port_info *pi = netdev_priv(dev);
1177 struct adapter *adapter = pi->adapter;
1179 if (adapter->params.rev > 0) {
1180 t3_set_vlan_accel(adapter, 1 << pi->port_id,
1181 features & NETIF_F_HW_VLAN_CTAG_RX);
1183 /* single control for all ports */
1184 unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_CTAG_RX;
1186 for_each_port(adapter, i)
1188 adapter->port[i]->features &
1189 NETIF_F_HW_VLAN_CTAG_RX;
1191 t3_set_vlan_accel(adapter, 1, have_vlans);
1193 t3_synchronize_rx(adapter, pi);
1197 * cxgb_up - enable the adapter
1198 * @adapter: adapter being enabled
1200 * Called when the first port is enabled, this function performs the
1201 * actions necessary to make an adapter operational, such as completing
1202 * the initialization of HW modules, and enabling interrupts.
1204 * Must be called with the rtnl lock held.
1206 static int cxgb_up(struct adapter *adap)
1210 if (!(adap->flags & FULL_INIT_DONE)) {
1211 err = t3_check_fw_version(adap);
1212 if (err == -EINVAL) {
1213 err = upgrade_fw(adap);
1214 CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1215 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1216 FW_VERSION_MICRO, err ? "failed" : "succeeded");
1219 err = t3_check_tpsram_version(adap);
1220 if (err == -EINVAL) {
1221 err = update_tpsram(adap);
1222 CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1223 TP_VERSION_MAJOR, TP_VERSION_MINOR,
1224 TP_VERSION_MICRO, err ? "failed" : "succeeded");
1228 * Clear interrupts now to catch errors if t3_init_hw fails.
1229 * We clear them again later as initialization may trigger
1230 * conditions that can interrupt.
1232 t3_intr_clear(adap);
1234 err = t3_init_hw(adap, 0);
1238 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1239 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1241 err = setup_sge_qsets(adap);
1245 for_each_port(adap, i)
1246 cxgb_vlan_mode(adap->port[i], adap->port[i]->features);
1249 if (!(adap->flags & NAPI_INIT))
1252 t3_start_sge_timers(adap);
1253 adap->flags |= FULL_INIT_DONE;
1256 t3_intr_clear(adap);
1258 if (adap->flags & USING_MSIX) {
1259 name_msix_vecs(adap);
1260 err = request_irq(adap->msix_info[0].vec,
1261 t3_async_intr_handler, 0,
1262 adap->msix_info[0].desc, adap);
1266 err = request_msix_data_irqs(adap);
1268 free_irq(adap->msix_info[0].vec, adap);
1271 } else if ((err = request_irq(adap->pdev->irq,
1272 t3_intr_handler(adap,
1273 adap->sge.qs[0].rspq.
1275 (adap->flags & USING_MSI) ?
1280 enable_all_napi(adap);
1282 t3_intr_enable(adap);
1284 if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1285 is_offload(adap) && init_tp_parity(adap) == 0)
1286 adap->flags |= TP_PARITY_INIT;
1288 if (adap->flags & TP_PARITY_INIT) {
1289 t3_write_reg(adap, A_TP_INT_CAUSE,
1290 F_CMCACHEPERR | F_ARPLUTPERR);
1291 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1294 if (!(adap->flags & QUEUES_BOUND)) {
1295 int ret = bind_qsets(adap);
1298 CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
1299 t3_intr_disable(adap);
1300 free_irq_resources(adap);
1304 adap->flags |= QUEUES_BOUND;
1310 CH_ERR(adap, "request_irq failed, err %d\n", err);
1315 * Release resources when all the ports and offloading have been stopped.
1317 static void cxgb_down(struct adapter *adapter, int on_wq)
1319 t3_sge_stop(adapter);
1320 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
1321 t3_intr_disable(adapter);
1322 spin_unlock_irq(&adapter->work_lock);
1324 free_irq_resources(adapter);
1325 quiesce_rx(adapter);
1326 t3_sge_stop(adapter);
1328 flush_workqueue(cxgb3_wq);/* wait for external IRQ handler */
1331 static void schedule_chk_task(struct adapter *adap)
1335 timeo = adap->params.linkpoll_period ?
1336 (HZ * adap->params.linkpoll_period) / 10 :
1337 adap->params.stats_update_period * HZ;
1339 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1342 static int offload_open(struct net_device *dev)
1344 struct port_info *pi = netdev_priv(dev);
1345 struct adapter *adapter = pi->adapter;
1346 struct t3cdev *tdev = dev2t3cdev(dev);
1347 int adap_up = adapter->open_device_map & PORT_MASK;
1350 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1353 if (!adap_up && (err = cxgb_up(adapter)) < 0)
1356 t3_tp_set_offload_mode(adapter, 1);
1357 tdev->lldev = adapter->port[0];
1358 err = cxgb3_offload_activate(adapter);
1362 init_port_mtus(adapter);
1363 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1364 adapter->params.b_wnd,
1365 adapter->params.rev == 0 ?
1366 adapter->port[0]->mtu : 0xffff);
1369 if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1370 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1372 /* Call back all registered clients */
1373 cxgb3_add_clients(tdev);
1376 /* restore them in case the offload module has changed them */
1378 t3_tp_set_offload_mode(adapter, 0);
1379 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1380 cxgb3_set_dummy_ops(tdev);
1385 static int offload_close(struct t3cdev *tdev)
1387 struct adapter *adapter = tdev2adap(tdev);
1388 struct t3c_data *td = T3C_DATA(tdev);
1390 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1393 /* Call back all registered clients */
1394 cxgb3_remove_clients(tdev);
1396 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1398 /* Flush work scheduled while releasing TIDs */
1399 flush_work(&td->tid_release_task);
1402 cxgb3_set_dummy_ops(tdev);
1403 t3_tp_set_offload_mode(adapter, 0);
1404 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1406 if (!adapter->open_device_map)
1407 cxgb_down(adapter, 0);
1409 cxgb3_offload_deactivate(adapter);
1413 static int cxgb_open(struct net_device *dev)
1415 struct port_info *pi = netdev_priv(dev);
1416 struct adapter *adapter = pi->adapter;
1417 int other_ports = adapter->open_device_map & PORT_MASK;
1420 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1423 set_bit(pi->port_id, &adapter->open_device_map);
1424 if (is_offload(adapter) && !ofld_disable) {
1425 err = offload_open(dev);
1427 pr_warn("Could not initialize offload capabilities\n");
1430 netif_set_real_num_tx_queues(dev, pi->nqsets);
1431 err = netif_set_real_num_rx_queues(dev, pi->nqsets);
1435 t3_port_intr_enable(adapter, pi->port_id);
1436 netif_tx_start_all_queues(dev);
1438 schedule_chk_task(adapter);
1440 cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
1444 static int __cxgb_close(struct net_device *dev, int on_wq)
1446 struct port_info *pi = netdev_priv(dev);
1447 struct adapter *adapter = pi->adapter;
1450 if (!adapter->open_device_map)
1453 /* Stop link fault interrupts */
1454 t3_xgm_intr_disable(adapter, pi->port_id);
1455 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1457 t3_port_intr_disable(adapter, pi->port_id);
1458 netif_tx_stop_all_queues(dev);
1459 pi->phy.ops->power_down(&pi->phy, 1);
1460 netif_carrier_off(dev);
1461 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1463 spin_lock_irq(&adapter->work_lock); /* sync with update task */
1464 clear_bit(pi->port_id, &adapter->open_device_map);
1465 spin_unlock_irq(&adapter->work_lock);
1467 if (!(adapter->open_device_map & PORT_MASK))
1468 cancel_delayed_work_sync(&adapter->adap_check_task);
1470 if (!adapter->open_device_map)
1471 cxgb_down(adapter, on_wq);
1473 cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
1477 static int cxgb_close(struct net_device *dev)
1479 return __cxgb_close(dev, 0);
1482 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1484 struct port_info *pi = netdev_priv(dev);
1485 struct adapter *adapter = pi->adapter;
1486 struct net_device_stats *ns = &dev->stats;
1487 const struct mac_stats *pstats;
1489 spin_lock(&adapter->stats_lock);
1490 pstats = t3_mac_update_stats(&pi->mac);
1491 spin_unlock(&adapter->stats_lock);
1493 ns->tx_bytes = pstats->tx_octets;
1494 ns->tx_packets = pstats->tx_frames;
1495 ns->rx_bytes = pstats->rx_octets;
1496 ns->rx_packets = pstats->rx_frames;
1497 ns->multicast = pstats->rx_mcast_frames;
1499 ns->tx_errors = pstats->tx_underrun;
1500 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1501 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1502 pstats->rx_fifo_ovfl;
1504 /* detailed rx_errors */
1505 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1506 ns->rx_over_errors = 0;
1507 ns->rx_crc_errors = pstats->rx_fcs_errs;
1508 ns->rx_frame_errors = pstats->rx_symbol_errs;
1509 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1510 ns->rx_missed_errors = pstats->rx_cong_drops;
1512 /* detailed tx_errors */
1513 ns->tx_aborted_errors = 0;
1514 ns->tx_carrier_errors = 0;
1515 ns->tx_fifo_errors = pstats->tx_underrun;
1516 ns->tx_heartbeat_errors = 0;
1517 ns->tx_window_errors = 0;
1521 static u32 get_msglevel(struct net_device *dev)
1523 struct port_info *pi = netdev_priv(dev);
1524 struct adapter *adapter = pi->adapter;
1526 return adapter->msg_enable;
1529 static void set_msglevel(struct net_device *dev, u32 val)
1531 struct port_info *pi = netdev_priv(dev);
1532 struct adapter *adapter = pi->adapter;
1534 adapter->msg_enable = val;
1537 static const char stats_strings[][ETH_GSTRING_LEN] = {
1540 "TxMulticastFramesOK",
1541 "TxBroadcastFramesOK",
1548 "TxFrames128To255 ",
1549 "TxFrames256To511 ",
1550 "TxFrames512To1023 ",
1551 "TxFrames1024To1518 ",
1552 "TxFrames1519ToMax ",
1556 "RxMulticastFramesOK",
1557 "RxBroadcastFramesOK",
1568 "RxFrames128To255 ",
1569 "RxFrames256To511 ",
1570 "RxFrames512To1023 ",
1571 "RxFrames1024To1518 ",
1572 "RxFrames1519ToMax ",
1585 "CheckTXEnToggled ",
1591 static int get_sset_count(struct net_device *dev, int sset)
1595 return ARRAY_SIZE(stats_strings);
1601 #define T3_REGMAP_SIZE (3 * 1024)
1603 static int get_regs_len(struct net_device *dev)
1605 return T3_REGMAP_SIZE;
1608 static int get_eeprom_len(struct net_device *dev)
1613 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1615 struct port_info *pi = netdev_priv(dev);
1616 struct adapter *adapter = pi->adapter;
1620 spin_lock(&adapter->stats_lock);
1621 t3_get_fw_version(adapter, &fw_vers);
1622 t3_get_tp_version(adapter, &tp_vers);
1623 spin_unlock(&adapter->stats_lock);
1625 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1626 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1627 strlcpy(info->bus_info, pci_name(adapter->pdev),
1628 sizeof(info->bus_info));
1630 snprintf(info->fw_version, sizeof(info->fw_version),
1631 "%s %u.%u.%u TP %u.%u.%u",
1632 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1633 G_FW_VERSION_MAJOR(fw_vers),
1634 G_FW_VERSION_MINOR(fw_vers),
1635 G_FW_VERSION_MICRO(fw_vers),
1636 G_TP_VERSION_MAJOR(tp_vers),
1637 G_TP_VERSION_MINOR(tp_vers),
1638 G_TP_VERSION_MICRO(tp_vers));
1641 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1643 if (stringset == ETH_SS_STATS)
1644 memcpy(data, stats_strings, sizeof(stats_strings));
1647 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1648 struct port_info *p, int idx)
1651 unsigned long tot = 0;
1653 for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1654 tot += adapter->sge.qs[i].port_stats[idx];
1658 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1661 struct port_info *pi = netdev_priv(dev);
1662 struct adapter *adapter = pi->adapter;
1663 const struct mac_stats *s;
1665 spin_lock(&adapter->stats_lock);
1666 s = t3_mac_update_stats(&pi->mac);
1667 spin_unlock(&adapter->stats_lock);
1669 *data++ = s->tx_octets;
1670 *data++ = s->tx_frames;
1671 *data++ = s->tx_mcast_frames;
1672 *data++ = s->tx_bcast_frames;
1673 *data++ = s->tx_pause;
1674 *data++ = s->tx_underrun;
1675 *data++ = s->tx_fifo_urun;
1677 *data++ = s->tx_frames_64;
1678 *data++ = s->tx_frames_65_127;
1679 *data++ = s->tx_frames_128_255;
1680 *data++ = s->tx_frames_256_511;
1681 *data++ = s->tx_frames_512_1023;
1682 *data++ = s->tx_frames_1024_1518;
1683 *data++ = s->tx_frames_1519_max;
1685 *data++ = s->rx_octets;
1686 *data++ = s->rx_frames;
1687 *data++ = s->rx_mcast_frames;
1688 *data++ = s->rx_bcast_frames;
1689 *data++ = s->rx_pause;
1690 *data++ = s->rx_fcs_errs;
1691 *data++ = s->rx_symbol_errs;
1692 *data++ = s->rx_short;
1693 *data++ = s->rx_jabber;
1694 *data++ = s->rx_too_long;
1695 *data++ = s->rx_fifo_ovfl;
1697 *data++ = s->rx_frames_64;
1698 *data++ = s->rx_frames_65_127;
1699 *data++ = s->rx_frames_128_255;
1700 *data++ = s->rx_frames_256_511;
1701 *data++ = s->rx_frames_512_1023;
1702 *data++ = s->rx_frames_1024_1518;
1703 *data++ = s->rx_frames_1519_max;
1705 *data++ = pi->phy.fifo_errors;
1707 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1708 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1709 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1710 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1711 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1715 *data++ = s->rx_cong_drops;
1717 *data++ = s->num_toggled;
1718 *data++ = s->num_resets;
1720 *data++ = s->link_faults;
1723 static inline void reg_block_dump(struct adapter *ap, void *buf,
1724 unsigned int start, unsigned int end)
1726 u32 *p = buf + start;
1728 for (; start <= end; start += sizeof(u32))
1729 *p++ = t3_read_reg(ap, start);
1732 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1735 struct port_info *pi = netdev_priv(dev);
1736 struct adapter *ap = pi->adapter;
1740 * bits 0..9: chip version
1741 * bits 10..15: chip revision
1742 * bit 31: set for PCIe cards
1744 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1747 * We skip the MAC statistics registers because they are clear-on-read.
1748 * Also reading multi-register stats would need to synchronize with the
1749 * periodic mac stats accumulation. Hard to justify the complexity.
1751 memset(buf, 0, T3_REGMAP_SIZE);
1752 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1753 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1754 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1755 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1756 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1757 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1758 XGM_REG(A_XGM_SERDES_STAT3, 1));
1759 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1760 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1763 static int restart_autoneg(struct net_device *dev)
1765 struct port_info *p = netdev_priv(dev);
1767 if (!netif_running(dev))
1769 if (p->link_config.autoneg != AUTONEG_ENABLE)
1771 p->phy.ops->autoneg_restart(&p->phy);
1775 static int set_phys_id(struct net_device *dev,
1776 enum ethtool_phys_id_state state)
1778 struct port_info *pi = netdev_priv(dev);
1779 struct adapter *adapter = pi->adapter;
1782 case ETHTOOL_ID_ACTIVE:
1783 return 1; /* cycle on/off once per second */
1785 case ETHTOOL_ID_OFF:
1786 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
1790 case ETHTOOL_ID_INACTIVE:
1791 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1798 static int get_link_ksettings(struct net_device *dev,
1799 struct ethtool_link_ksettings *cmd)
1801 struct port_info *p = netdev_priv(dev);
1804 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1805 p->link_config.supported);
1806 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1807 p->link_config.advertising);
1809 if (netif_carrier_ok(dev)) {
1810 cmd->base.speed = p->link_config.speed;
1811 cmd->base.duplex = p->link_config.duplex;
1813 cmd->base.speed = SPEED_UNKNOWN;
1814 cmd->base.duplex = DUPLEX_UNKNOWN;
1817 ethtool_convert_link_mode_to_legacy_u32(&supported,
1818 cmd->link_modes.supported);
1820 cmd->base.port = (supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1821 cmd->base.phy_address = p->phy.mdio.prtad;
1822 cmd->base.autoneg = p->link_config.autoneg;
1826 static int speed_duplex_to_caps(int speed, int duplex)
1832 if (duplex == DUPLEX_FULL)
1833 cap = SUPPORTED_10baseT_Full;
1835 cap = SUPPORTED_10baseT_Half;
1838 if (duplex == DUPLEX_FULL)
1839 cap = SUPPORTED_100baseT_Full;
1841 cap = SUPPORTED_100baseT_Half;
1844 if (duplex == DUPLEX_FULL)
1845 cap = SUPPORTED_1000baseT_Full;
1847 cap = SUPPORTED_1000baseT_Half;
1850 if (duplex == DUPLEX_FULL)
1851 cap = SUPPORTED_10000baseT_Full;
1856 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1857 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1858 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1859 ADVERTISED_10000baseT_Full)
1861 static int set_link_ksettings(struct net_device *dev,
1862 const struct ethtool_link_ksettings *cmd)
1864 struct port_info *p = netdev_priv(dev);
1865 struct link_config *lc = &p->link_config;
1868 ethtool_convert_link_mode_to_legacy_u32(&advertising,
1869 cmd->link_modes.advertising);
1871 if (!(lc->supported & SUPPORTED_Autoneg)) {
1873 * PHY offers a single speed/duplex. See if that's what's
1876 if (cmd->base.autoneg == AUTONEG_DISABLE) {
1877 u32 speed = cmd->base.speed;
1878 int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
1879 if (lc->supported & cap)
1885 if (cmd->base.autoneg == AUTONEG_DISABLE) {
1886 u32 speed = cmd->base.speed;
1887 int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
1889 if (!(lc->supported & cap) || (speed == SPEED_1000))
1891 lc->requested_speed = speed;
1892 lc->requested_duplex = cmd->base.duplex;
1893 lc->advertising = 0;
1895 advertising &= ADVERTISED_MASK;
1896 advertising &= lc->supported;
1899 lc->requested_speed = SPEED_INVALID;
1900 lc->requested_duplex = DUPLEX_INVALID;
1901 lc->advertising = advertising | ADVERTISED_Autoneg;
1903 lc->autoneg = cmd->base.autoneg;
1904 if (netif_running(dev))
1905 t3_link_start(&p->phy, &p->mac, lc);
1909 static void get_pauseparam(struct net_device *dev,
1910 struct ethtool_pauseparam *epause)
1912 struct port_info *p = netdev_priv(dev);
1914 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1915 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1916 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1919 static int set_pauseparam(struct net_device *dev,
1920 struct ethtool_pauseparam *epause)
1922 struct port_info *p = netdev_priv(dev);
1923 struct link_config *lc = &p->link_config;
1925 if (epause->autoneg == AUTONEG_DISABLE)
1926 lc->requested_fc = 0;
1927 else if (lc->supported & SUPPORTED_Autoneg)
1928 lc->requested_fc = PAUSE_AUTONEG;
1932 if (epause->rx_pause)
1933 lc->requested_fc |= PAUSE_RX;
1934 if (epause->tx_pause)
1935 lc->requested_fc |= PAUSE_TX;
1936 if (lc->autoneg == AUTONEG_ENABLE) {
1937 if (netif_running(dev))
1938 t3_link_start(&p->phy, &p->mac, lc);
1940 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1941 if (netif_running(dev))
1942 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1947 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1949 struct port_info *pi = netdev_priv(dev);
1950 struct adapter *adapter = pi->adapter;
1951 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1953 e->rx_max_pending = MAX_RX_BUFFERS;
1954 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1955 e->tx_max_pending = MAX_TXQ_ENTRIES;
1957 e->rx_pending = q->fl_size;
1958 e->rx_mini_pending = q->rspq_size;
1959 e->rx_jumbo_pending = q->jumbo_size;
1960 e->tx_pending = q->txq_size[0];
1963 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1965 struct port_info *pi = netdev_priv(dev);
1966 struct adapter *adapter = pi->adapter;
1967 struct qset_params *q;
1970 if (e->rx_pending > MAX_RX_BUFFERS ||
1971 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1972 e->tx_pending > MAX_TXQ_ENTRIES ||
1973 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1974 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1975 e->rx_pending < MIN_FL_ENTRIES ||
1976 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1977 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1980 if (adapter->flags & FULL_INIT_DONE)
1983 q = &adapter->params.sge.qset[pi->first_qset];
1984 for (i = 0; i < pi->nqsets; ++i, ++q) {
1985 q->rspq_size = e->rx_mini_pending;
1986 q->fl_size = e->rx_pending;
1987 q->jumbo_size = e->rx_jumbo_pending;
1988 q->txq_size[0] = e->tx_pending;
1989 q->txq_size[1] = e->tx_pending;
1990 q->txq_size[2] = e->tx_pending;
1995 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1997 struct port_info *pi = netdev_priv(dev);
1998 struct adapter *adapter = pi->adapter;
1999 struct qset_params *qsp;
2000 struct sge_qset *qs;
2003 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
2006 for (i = 0; i < pi->nqsets; i++) {
2007 qsp = &adapter->params.sge.qset[i];
2008 qs = &adapter->sge.qs[i];
2009 qsp->coalesce_usecs = c->rx_coalesce_usecs;
2010 t3_update_qset_coalesce(qs, qsp);
2016 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2018 struct port_info *pi = netdev_priv(dev);
2019 struct adapter *adapter = pi->adapter;
2020 struct qset_params *q = adapter->params.sge.qset;
2022 c->rx_coalesce_usecs = q->coalesce_usecs;
2026 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2029 struct port_info *pi = netdev_priv(dev);
2030 struct adapter *adapter = pi->adapter;
2033 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2037 e->magic = EEPROM_MAGIC;
2038 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2039 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
2042 memcpy(data, buf + e->offset, e->len);
2047 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2050 struct port_info *pi = netdev_priv(dev);
2051 struct adapter *adapter = pi->adapter;
2052 u32 aligned_offset, aligned_len;
2057 if (eeprom->magic != EEPROM_MAGIC)
2060 aligned_offset = eeprom->offset & ~3;
2061 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2063 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2064 buf = kmalloc(aligned_len, GFP_KERNEL);
2067 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
2068 if (!err && aligned_len > 4)
2069 err = t3_seeprom_read(adapter,
2070 aligned_offset + aligned_len - 4,
2071 (__le32 *) & buf[aligned_len - 4]);
2074 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2078 err = t3_seeprom_wp(adapter, 0);
2082 for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
2083 err = t3_seeprom_write(adapter, aligned_offset, *p);
2084 aligned_offset += 4;
2088 err = t3_seeprom_wp(adapter, 1);
2095 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2099 memset(&wol->sopass, 0, sizeof(wol->sopass));
2102 static const struct ethtool_ops cxgb_ethtool_ops = {
2103 .get_drvinfo = get_drvinfo,
2104 .get_msglevel = get_msglevel,
2105 .set_msglevel = set_msglevel,
2106 .get_ringparam = get_sge_param,
2107 .set_ringparam = set_sge_param,
2108 .get_coalesce = get_coalesce,
2109 .set_coalesce = set_coalesce,
2110 .get_eeprom_len = get_eeprom_len,
2111 .get_eeprom = get_eeprom,
2112 .set_eeprom = set_eeprom,
2113 .get_pauseparam = get_pauseparam,
2114 .set_pauseparam = set_pauseparam,
2115 .get_link = ethtool_op_get_link,
2116 .get_strings = get_strings,
2117 .set_phys_id = set_phys_id,
2118 .nway_reset = restart_autoneg,
2119 .get_sset_count = get_sset_count,
2120 .get_ethtool_stats = get_stats,
2121 .get_regs_len = get_regs_len,
2122 .get_regs = get_regs,
2124 .get_link_ksettings = get_link_ksettings,
2125 .set_link_ksettings = set_link_ksettings,
2128 static int in_range(int val, int lo, int hi)
2130 return val < 0 || (val <= hi && val >= lo);
2133 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2135 struct port_info *pi = netdev_priv(dev);
2136 struct adapter *adapter = pi->adapter;
2140 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
2144 case CHELSIO_SET_QSET_PARAMS:{
2146 struct qset_params *q;
2147 struct ch_qset_params t;
2148 int q1 = pi->first_qset;
2149 int nqsets = pi->nqsets;
2151 if (!capable(CAP_NET_ADMIN))
2153 if (copy_from_user(&t, useraddr, sizeof(t)))
2155 if (t.cmd != CHELSIO_SET_QSET_PARAMS)
2157 if (t.qset_idx >= SGE_QSETS)
2159 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
2160 !in_range(t.cong_thres, 0, 255) ||
2161 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
2163 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
2165 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2166 MAX_CTRL_TXQ_ENTRIES) ||
2167 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
2169 !in_range(t.fl_size[1], MIN_FL_ENTRIES,
2170 MAX_RX_JUMBO_BUFFERS) ||
2171 !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
2175 if ((adapter->flags & FULL_INIT_DONE) &&
2176 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2177 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
2178 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
2179 t.polling >= 0 || t.cong_thres >= 0))
2182 /* Allow setting of any available qset when offload enabled */
2183 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2185 for_each_port(adapter, i) {
2186 pi = adap2pinfo(adapter, i);
2187 nqsets += pi->first_qset + pi->nqsets;
2191 if (t.qset_idx < q1)
2193 if (t.qset_idx > q1 + nqsets - 1)
2196 q = &adapter->params.sge.qset[t.qset_idx];
2198 if (t.rspq_size >= 0)
2199 q->rspq_size = t.rspq_size;
2200 if (t.fl_size[0] >= 0)
2201 q->fl_size = t.fl_size[0];
2202 if (t.fl_size[1] >= 0)
2203 q->jumbo_size = t.fl_size[1];
2204 if (t.txq_size[0] >= 0)
2205 q->txq_size[0] = t.txq_size[0];
2206 if (t.txq_size[1] >= 0)
2207 q->txq_size[1] = t.txq_size[1];
2208 if (t.txq_size[2] >= 0)
2209 q->txq_size[2] = t.txq_size[2];
2210 if (t.cong_thres >= 0)
2211 q->cong_thres = t.cong_thres;
2212 if (t.intr_lat >= 0) {
2213 struct sge_qset *qs =
2214 &adapter->sge.qs[t.qset_idx];
2216 q->coalesce_usecs = t.intr_lat;
2217 t3_update_qset_coalesce(qs, q);
2219 if (t.polling >= 0) {
2220 if (adapter->flags & USING_MSIX)
2221 q->polling = t.polling;
2223 /* No polling with INTx for T3A */
2224 if (adapter->params.rev == 0 &&
2225 !(adapter->flags & USING_MSI))
2228 for (i = 0; i < SGE_QSETS; i++) {
2229 q = &adapter->params.sge.
2231 q->polling = t.polling;
2238 dev->wanted_features |= NETIF_F_GRO;
2240 dev->wanted_features &= ~NETIF_F_GRO;
2241 netdev_update_features(dev);
2246 case CHELSIO_GET_QSET_PARAMS:{
2247 struct qset_params *q;
2248 struct ch_qset_params t;
2249 int q1 = pi->first_qset;
2250 int nqsets = pi->nqsets;
2253 if (copy_from_user(&t, useraddr, sizeof(t)))
2256 if (t.cmd != CHELSIO_GET_QSET_PARAMS)
2259 /* Display qsets for all ports when offload enabled */
2260 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2262 for_each_port(adapter, i) {
2263 pi = adap2pinfo(adapter, i);
2264 nqsets = pi->first_qset + pi->nqsets;
2268 if (t.qset_idx >= nqsets)
2270 t.qset_idx = array_index_nospec(t.qset_idx, nqsets);
2272 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2273 t.rspq_size = q->rspq_size;
2274 t.txq_size[0] = q->txq_size[0];
2275 t.txq_size[1] = q->txq_size[1];
2276 t.txq_size[2] = q->txq_size[2];
2277 t.fl_size[0] = q->fl_size;
2278 t.fl_size[1] = q->jumbo_size;
2279 t.polling = q->polling;
2280 t.lro = !!(dev->features & NETIF_F_GRO);
2281 t.intr_lat = q->coalesce_usecs;
2282 t.cong_thres = q->cong_thres;
2285 if (adapter->flags & USING_MSIX)
2286 t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2288 t.vector = adapter->pdev->irq;
2290 if (copy_to_user(useraddr, &t, sizeof(t)))
2294 case CHELSIO_SET_QSET_NUM:{
2295 struct ch_reg edata;
2296 unsigned int i, first_qset = 0, other_qsets = 0;
2298 if (!capable(CAP_NET_ADMIN))
2300 if (adapter->flags & FULL_INIT_DONE)
2302 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2304 if (edata.cmd != CHELSIO_SET_QSET_NUM)
2306 if (edata.val < 1 ||
2307 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2310 for_each_port(adapter, i)
2311 if (adapter->port[i] && adapter->port[i] != dev)
2312 other_qsets += adap2pinfo(adapter, i)->nqsets;
2314 if (edata.val + other_qsets > SGE_QSETS)
2317 pi->nqsets = edata.val;
2319 for_each_port(adapter, i)
2320 if (adapter->port[i]) {
2321 pi = adap2pinfo(adapter, i);
2322 pi->first_qset = first_qset;
2323 first_qset += pi->nqsets;
2327 case CHELSIO_GET_QSET_NUM:{
2328 struct ch_reg edata;
2330 memset(&edata, 0, sizeof(struct ch_reg));
2332 edata.cmd = CHELSIO_GET_QSET_NUM;
2333 edata.val = pi->nqsets;
2334 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2338 case CHELSIO_LOAD_FW:{
2340 struct ch_mem_range t;
2342 if (!capable(CAP_SYS_RAWIO))
2344 if (copy_from_user(&t, useraddr, sizeof(t)))
2346 if (t.cmd != CHELSIO_LOAD_FW)
2348 /* Check t.len sanity ? */
2349 fw_data = memdup_user(useraddr + sizeof(t), t.len);
2350 if (IS_ERR(fw_data))
2351 return PTR_ERR(fw_data);
2353 ret = t3_load_fw(adapter, fw_data, t.len);
2359 case CHELSIO_SETMTUTAB:{
2363 if (!is_offload(adapter))
2365 if (!capable(CAP_NET_ADMIN))
2367 if (offload_running(adapter))
2369 if (copy_from_user(&m, useraddr, sizeof(m)))
2371 if (m.cmd != CHELSIO_SETMTUTAB)
2373 if (m.nmtus != NMTUS)
2375 if (m.mtus[0] < 81) /* accommodate SACK */
2378 /* MTUs must be in ascending order */
2379 for (i = 1; i < NMTUS; ++i)
2380 if (m.mtus[i] < m.mtus[i - 1])
2383 memcpy(adapter->params.mtus, m.mtus,
2384 sizeof(adapter->params.mtus));
2387 case CHELSIO_GET_PM:{
2388 struct tp_params *p = &adapter->params.tp;
2389 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2391 if (!is_offload(adapter))
2393 m.tx_pg_sz = p->tx_pg_size;
2394 m.tx_num_pg = p->tx_num_pgs;
2395 m.rx_pg_sz = p->rx_pg_size;
2396 m.rx_num_pg = p->rx_num_pgs;
2397 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2398 if (copy_to_user(useraddr, &m, sizeof(m)))
2402 case CHELSIO_SET_PM:{
2404 struct tp_params *p = &adapter->params.tp;
2406 if (!is_offload(adapter))
2408 if (!capable(CAP_NET_ADMIN))
2410 if (adapter->flags & FULL_INIT_DONE)
2412 if (copy_from_user(&m, useraddr, sizeof(m)))
2414 if (m.cmd != CHELSIO_SET_PM)
2416 if (!is_power_of_2(m.rx_pg_sz) ||
2417 !is_power_of_2(m.tx_pg_sz))
2418 return -EINVAL; /* not power of 2 */
2419 if (!(m.rx_pg_sz & 0x14000))
2420 return -EINVAL; /* not 16KB or 64KB */
2421 if (!(m.tx_pg_sz & 0x1554000))
2423 if (m.tx_num_pg == -1)
2424 m.tx_num_pg = p->tx_num_pgs;
2425 if (m.rx_num_pg == -1)
2426 m.rx_num_pg = p->rx_num_pgs;
2427 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2429 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2430 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2432 p->rx_pg_size = m.rx_pg_sz;
2433 p->tx_pg_size = m.tx_pg_sz;
2434 p->rx_num_pgs = m.rx_num_pg;
2435 p->tx_num_pgs = m.tx_num_pg;
2438 case CHELSIO_GET_MEM:{
2439 struct ch_mem_range t;
2443 if (!is_offload(adapter))
2445 if (!capable(CAP_NET_ADMIN))
2447 if (!(adapter->flags & FULL_INIT_DONE))
2448 return -EIO; /* need the memory controllers */
2449 if (copy_from_user(&t, useraddr, sizeof(t)))
2451 if (t.cmd != CHELSIO_GET_MEM)
2453 if ((t.addr & 7) || (t.len & 7))
2455 if (t.mem_id == MEM_CM)
2457 else if (t.mem_id == MEM_PMRX)
2458 mem = &adapter->pmrx;
2459 else if (t.mem_id == MEM_PMTX)
2460 mem = &adapter->pmtx;
2466 * bits 0..9: chip version
2467 * bits 10..15: chip revision
2469 t.version = 3 | (adapter->params.rev << 10);
2470 if (copy_to_user(useraddr, &t, sizeof(t)))
2474 * Read 256 bytes at a time as len can be large and we don't
2475 * want to use huge intermediate buffers.
2477 useraddr += sizeof(t); /* advance to start of buffer */
2479 unsigned int chunk =
2480 min_t(unsigned int, t.len, sizeof(buf));
2483 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2487 if (copy_to_user(useraddr, buf, chunk))
2495 case CHELSIO_SET_TRACE_FILTER:{
2497 const struct trace_params *tp;
2499 if (!capable(CAP_NET_ADMIN))
2501 if (!offload_running(adapter))
2503 if (copy_from_user(&t, useraddr, sizeof(t)))
2505 if (t.cmd != CHELSIO_SET_TRACE_FILTER)
2508 tp = (const struct trace_params *)&t.sip;
2510 t3_config_trace_filter(adapter, tp, 0,
2514 t3_config_trace_filter(adapter, tp, 1,
2525 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2527 struct mii_ioctl_data *data = if_mii(req);
2528 struct port_info *pi = netdev_priv(dev);
2529 struct adapter *adapter = pi->adapter;
2534 /* Convert phy_id from older PRTAD/DEVAD format */
2535 if (is_10G(adapter) &&
2536 !mdio_phy_id_is_c45(data->phy_id) &&
2537 (data->phy_id & 0x1f00) &&
2538 !(data->phy_id & 0xe0e0))
2539 data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2540 data->phy_id & 0x1f);
2543 return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
2545 return cxgb_extension_ioctl(dev, req->ifr_data);
2551 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2553 struct port_info *pi = netdev_priv(dev);
2554 struct adapter *adapter = pi->adapter;
2557 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2560 init_port_mtus(adapter);
2561 if (adapter->params.rev == 0 && offload_running(adapter))
2562 t3_load_mtus(adapter, adapter->params.mtus,
2563 adapter->params.a_wnd, adapter->params.b_wnd,
2564 adapter->port[0]->mtu);
2568 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2570 struct port_info *pi = netdev_priv(dev);
2571 struct adapter *adapter = pi->adapter;
2572 struct sockaddr *addr = p;
2574 if (!is_valid_ether_addr(addr->sa_data))
2575 return -EADDRNOTAVAIL;
2577 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2578 t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
2579 if (offload_running(adapter))
2580 write_smt_entry(adapter, pi->port_id);
2584 static netdev_features_t cxgb_fix_features(struct net_device *dev,
2585 netdev_features_t features)
2588 * Since there is no support for separate rx/tx vlan accel
2589 * enable/disable make sure tx flag is always in same state as rx.
2591 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2592 features |= NETIF_F_HW_VLAN_CTAG_TX;
2594 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2599 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2601 netdev_features_t changed = dev->features ^ features;
2603 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2604 cxgb_vlan_mode(dev, features);
2609 #ifdef CONFIG_NET_POLL_CONTROLLER
2610 static void cxgb_netpoll(struct net_device *dev)
2612 struct port_info *pi = netdev_priv(dev);
2613 struct adapter *adapter = pi->adapter;
2616 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2617 struct sge_qset *qs = &adapter->sge.qs[qidx];
2620 if (adapter->flags & USING_MSIX)
2625 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2631 * Periodic accumulation of MAC statistics.
2633 static void mac_stats_update(struct adapter *adapter)
2637 for_each_port(adapter, i) {
2638 struct net_device *dev = adapter->port[i];
2639 struct port_info *p = netdev_priv(dev);
2641 if (netif_running(dev)) {
2642 spin_lock(&adapter->stats_lock);
2643 t3_mac_update_stats(&p->mac);
2644 spin_unlock(&adapter->stats_lock);
2649 static void check_link_status(struct adapter *adapter)
2653 for_each_port(adapter, i) {
2654 struct net_device *dev = adapter->port[i];
2655 struct port_info *p = netdev_priv(dev);
2658 spin_lock_irq(&adapter->work_lock);
2659 link_fault = p->link_fault;
2660 spin_unlock_irq(&adapter->work_lock);
2663 t3_link_fault(adapter, i);
2667 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2668 t3_xgm_intr_disable(adapter, i);
2669 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2671 t3_link_changed(adapter, i);
2672 t3_xgm_intr_enable(adapter, i);
2677 static void check_t3b2_mac(struct adapter *adapter)
2681 if (!rtnl_trylock()) /* synchronize with ifdown */
2684 for_each_port(adapter, i) {
2685 struct net_device *dev = adapter->port[i];
2686 struct port_info *p = netdev_priv(dev);
2689 if (!netif_running(dev))
2693 if (netif_running(dev) && netif_carrier_ok(dev))
2694 status = t3b2_mac_watchdog_task(&p->mac);
2696 p->mac.stats.num_toggled++;
2697 else if (status == 2) {
2698 struct cmac *mac = &p->mac;
2700 t3_mac_set_mtu(mac, dev->mtu);
2701 t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
2702 cxgb_set_rxmode(dev);
2703 t3_link_start(&p->phy, mac, &p->link_config);
2704 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2705 t3_port_intr_enable(adapter, p->port_id);
2706 p->mac.stats.num_resets++;
2713 static void t3_adap_check_task(struct work_struct *work)
2715 struct adapter *adapter = container_of(work, struct adapter,
2716 adap_check_task.work);
2717 const struct adapter_params *p = &adapter->params;
2719 unsigned int v, status, reset;
2721 adapter->check_task_cnt++;
2723 check_link_status(adapter);
2725 /* Accumulate MAC stats if needed */
2726 if (!p->linkpoll_period ||
2727 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2728 p->stats_update_period) {
2729 mac_stats_update(adapter);
2730 adapter->check_task_cnt = 0;
2733 if (p->rev == T3_REV_B2)
2734 check_t3b2_mac(adapter);
2737 * Scan the XGMAC's to check for various conditions which we want to
2738 * monitor in a periodic polling manner rather than via an interrupt
2739 * condition. This is used for conditions which would otherwise flood
2740 * the system with interrupts and we only really need to know that the
2741 * conditions are "happening" ... For each condition we count the
2742 * detection of the condition and reset it for the next polling loop.
2744 for_each_port(adapter, port) {
2745 struct cmac *mac = &adap2pinfo(adapter, port)->mac;
2748 cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2750 if (cause & F_RXFIFO_OVERFLOW) {
2751 mac->stats.rx_fifo_ovfl++;
2752 reset |= F_RXFIFO_OVERFLOW;
2755 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2759 * We do the same as above for FL_EMPTY interrupts.
2761 status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2764 if (status & F_FLEMPTY) {
2765 struct sge_qset *qs = &adapter->sge.qs[0];
2770 v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2774 qs->fl[i].empty += (v & 1);
2782 t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2784 /* Schedule the next check update if any port is active. */
2785 spin_lock_irq(&adapter->work_lock);
2786 if (adapter->open_device_map & PORT_MASK)
2787 schedule_chk_task(adapter);
2788 spin_unlock_irq(&adapter->work_lock);
2791 static void db_full_task(struct work_struct *work)
2793 struct adapter *adapter = container_of(work, struct adapter,
2796 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
2799 static void db_empty_task(struct work_struct *work)
2801 struct adapter *adapter = container_of(work, struct adapter,
2804 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
2807 static void db_drop_task(struct work_struct *work)
2809 struct adapter *adapter = container_of(work, struct adapter,
2811 unsigned long delay = 1000;
2814 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
2817 * Sleep a while before ringing the driver qset dbs.
2818 * The delay is between 1000-2023 usecs.
2820 get_random_bytes(&r, 2);
2822 set_current_state(TASK_UNINTERRUPTIBLE);
2823 schedule_timeout(usecs_to_jiffies(delay));
2828 * Processes external (PHY) interrupts in process context.
2830 static void ext_intr_task(struct work_struct *work)
2832 struct adapter *adapter = container_of(work, struct adapter,
2833 ext_intr_handler_task);
2836 /* Disable link fault interrupts */
2837 for_each_port(adapter, i) {
2838 struct net_device *dev = adapter->port[i];
2839 struct port_info *p = netdev_priv(dev);
2841 t3_xgm_intr_disable(adapter, i);
2842 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2845 /* Re-enable link fault interrupts */
2846 t3_phy_intr_handler(adapter);
2848 for_each_port(adapter, i)
2849 t3_xgm_intr_enable(adapter, i);
2851 /* Now reenable external interrupts */
2852 spin_lock_irq(&adapter->work_lock);
2853 if (adapter->slow_intr_mask) {
2854 adapter->slow_intr_mask |= F_T3DBG;
2855 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2856 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2857 adapter->slow_intr_mask);
2859 spin_unlock_irq(&adapter->work_lock);
2863 * Interrupt-context handler for external (PHY) interrupts.
2865 void t3_os_ext_intr_handler(struct adapter *adapter)
2868 * Schedule a task to handle external interrupts as they may be slow
2869 * and we use a mutex to protect MDIO registers. We disable PHY
2870 * interrupts in the meantime and let the task reenable them when
2873 spin_lock(&adapter->work_lock);
2874 if (adapter->slow_intr_mask) {
2875 adapter->slow_intr_mask &= ~F_T3DBG;
2876 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2877 adapter->slow_intr_mask);
2878 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2880 spin_unlock(&adapter->work_lock);
2883 void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2885 struct net_device *netdev = adapter->port[port_id];
2886 struct port_info *pi = netdev_priv(netdev);
2888 spin_lock(&adapter->work_lock);
2890 spin_unlock(&adapter->work_lock);
2893 static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq)
2897 if (is_offload(adapter) &&
2898 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2899 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2900 offload_close(&adapter->tdev);
2903 /* Stop all ports */
2904 for_each_port(adapter, i) {
2905 struct net_device *netdev = adapter->port[i];
2907 if (netif_running(netdev))
2908 __cxgb_close(netdev, on_wq);
2911 /* Stop SGE timers */
2912 t3_stop_sge_timers(adapter);
2914 adapter->flags &= ~FULL_INIT_DONE;
2917 ret = t3_reset_adapter(adapter);
2919 pci_disable_device(adapter->pdev);
2924 static int t3_reenable_adapter(struct adapter *adapter)
2926 if (pci_enable_device(adapter->pdev)) {
2927 dev_err(&adapter->pdev->dev,
2928 "Cannot re-enable PCI device after reset.\n");
2931 pci_set_master(adapter->pdev);
2932 pci_restore_state(adapter->pdev);
2933 pci_save_state(adapter->pdev);
2935 /* Free sge resources */
2936 t3_free_sge_resources(adapter);
2938 if (t3_replay_prep_adapter(adapter))
2946 static void t3_resume_ports(struct adapter *adapter)
2950 /* Restart the ports */
2951 for_each_port(adapter, i) {
2952 struct net_device *netdev = adapter->port[i];
2954 if (netif_running(netdev)) {
2955 if (cxgb_open(netdev)) {
2956 dev_err(&adapter->pdev->dev,
2957 "can't bring device back up"
2964 if (is_offload(adapter) && !ofld_disable)
2965 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2969 * processes a fatal error.
2970 * Bring the ports down, reset the chip, bring the ports back up.
2972 static void fatal_error_task(struct work_struct *work)
2974 struct adapter *adapter = container_of(work, struct adapter,
2975 fatal_error_handler_task);
2979 err = t3_adapter_error(adapter, 1, 1);
2981 err = t3_reenable_adapter(adapter);
2983 t3_resume_ports(adapter);
2985 CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2989 void t3_fatal_err(struct adapter *adapter)
2991 unsigned int fw_status[4];
2993 if (adapter->flags & FULL_INIT_DONE) {
2994 t3_sge_stop(adapter);
2995 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2996 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2997 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2998 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
3000 spin_lock(&adapter->work_lock);
3001 t3_intr_disable(adapter);
3002 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
3003 spin_unlock(&adapter->work_lock);
3005 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
3006 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
3007 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
3008 fw_status[0], fw_status[1],
3009 fw_status[2], fw_status[3]);
3013 * t3_io_error_detected - called when PCI error is detected
3014 * @pdev: Pointer to PCI device
3015 * @state: The current pci connection state
3017 * This function is called after a PCI bus error affecting
3018 * this device has been detected.
3020 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
3021 pci_channel_state_t state)
3023 struct adapter *adapter = pci_get_drvdata(pdev);
3025 if (state == pci_channel_io_perm_failure)
3026 return PCI_ERS_RESULT_DISCONNECT;
3028 t3_adapter_error(adapter, 0, 0);
3030 /* Request a slot reset. */
3031 return PCI_ERS_RESULT_NEED_RESET;
3035 * t3_io_slot_reset - called after the pci bus has been reset.
3036 * @pdev: Pointer to PCI device
3038 * Restart the card from scratch, as if from a cold-boot.
3040 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
3042 struct adapter *adapter = pci_get_drvdata(pdev);
3044 if (!t3_reenable_adapter(adapter))
3045 return PCI_ERS_RESULT_RECOVERED;
3047 return PCI_ERS_RESULT_DISCONNECT;
3051 * t3_io_resume - called when traffic can start flowing again.
3052 * @pdev: Pointer to PCI device
3054 * This callback is called when the error recovery driver tells us that
3055 * its OK to resume normal operation.
3057 static void t3_io_resume(struct pci_dev *pdev)
3059 struct adapter *adapter = pci_get_drvdata(pdev);
3061 CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
3062 t3_read_reg(adapter, A_PCIE_PEX_ERR));
3065 t3_resume_ports(adapter);
3069 static const struct pci_error_handlers t3_err_handler = {
3070 .error_detected = t3_io_error_detected,
3071 .slot_reset = t3_io_slot_reset,
3072 .resume = t3_io_resume,
3076 * Set the number of qsets based on the number of CPUs and the number of ports,
3077 * not to exceed the number of available qsets, assuming there are enough qsets
3080 static void set_nqsets(struct adapter *adap)
3083 int num_cpus = netif_get_num_default_rss_queues();
3084 int hwports = adap->params.nports;
3085 int nqsets = adap->msix_nvectors - 1;
3087 if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
3089 (hwports * nqsets > SGE_QSETS ||
3090 num_cpus >= nqsets / hwports))
3092 if (nqsets > num_cpus)
3094 if (nqsets < 1 || hwports == 4)
3099 for_each_port(adap, i) {
3100 struct port_info *pi = adap2pinfo(adap, i);
3103 pi->nqsets = nqsets;
3104 j = pi->first_qset + nqsets;
3106 dev_info(&adap->pdev->dev,
3107 "Port %d using %d queue sets.\n", i, nqsets);
3111 static int cxgb_enable_msix(struct adapter *adap)
3113 struct msix_entry entries[SGE_QSETS + 1];
3117 vectors = ARRAY_SIZE(entries);
3118 for (i = 0; i < vectors; ++i)
3119 entries[i].entry = i;
3121 vectors = pci_enable_msix_range(adap->pdev, entries,
3122 adap->params.nports + 1, vectors);
3126 for (i = 0; i < vectors; ++i)
3127 adap->msix_info[i].vec = entries[i].vector;
3128 adap->msix_nvectors = vectors;
3133 static void print_port_info(struct adapter *adap, const struct adapter_info *ai)
3135 static const char *pci_variant[] = {
3136 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
3143 snprintf(buf, sizeof(buf), "%s x%d",
3144 pci_variant[adap->params.pci.variant],
3145 adap->params.pci.width);
3147 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
3148 pci_variant[adap->params.pci.variant],
3149 adap->params.pci.speed, adap->params.pci.width);
3151 for_each_port(adap, i) {
3152 struct net_device *dev = adap->port[i];
3153 const struct port_info *pi = netdev_priv(dev);
3155 if (!test_bit(i, &adap->registered_device_map))
3157 netdev_info(dev, "%s %s %sNIC (rev %d) %s%s\n",
3158 ai->desc, pi->phy.desc,
3159 is_offload(adap) ? "R" : "", adap->params.rev, buf,
3160 (adap->flags & USING_MSIX) ? " MSI-X" :
3161 (adap->flags & USING_MSI) ? " MSI" : "");
3162 if (adap->name == dev->name && adap->params.vpd.mclk)
3163 pr_info("%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
3164 adap->name, t3_mc7_size(&adap->cm) >> 20,
3165 t3_mc7_size(&adap->pmtx) >> 20,
3166 t3_mc7_size(&adap->pmrx) >> 20,
3167 adap->params.vpd.sn);
3171 static const struct net_device_ops cxgb_netdev_ops = {
3172 .ndo_open = cxgb_open,
3173 .ndo_stop = cxgb_close,
3174 .ndo_start_xmit = t3_eth_xmit,
3175 .ndo_get_stats = cxgb_get_stats,
3176 .ndo_validate_addr = eth_validate_addr,
3177 .ndo_set_rx_mode = cxgb_set_rxmode,
3178 .ndo_do_ioctl = cxgb_ioctl,
3179 .ndo_change_mtu = cxgb_change_mtu,
3180 .ndo_set_mac_address = cxgb_set_mac_addr,
3181 .ndo_fix_features = cxgb_fix_features,
3182 .ndo_set_features = cxgb_set_features,
3183 #ifdef CONFIG_NET_POLL_CONTROLLER
3184 .ndo_poll_controller = cxgb_netpoll,
3188 static void cxgb3_init_iscsi_mac(struct net_device *dev)
3190 struct port_info *pi = netdev_priv(dev);
3192 memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN);
3193 pi->iscsic.mac_addr[3] |= 0x80;
3196 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
3197 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
3198 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3199 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3201 int i, err, pci_using_dac = 0;
3202 resource_size_t mmio_start, mmio_len;
3203 const struct adapter_info *ai;
3204 struct adapter *adapter = NULL;
3205 struct port_info *pi;
3207 pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
3210 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3212 pr_err("cannot initialize work queue\n");
3217 err = pci_enable_device(pdev);
3219 dev_err(&pdev->dev, "cannot enable PCI device\n");
3223 err = pci_request_regions(pdev, DRV_NAME);
3225 /* Just info, some other driver may have claimed the device. */
3226 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3227 goto out_disable_device;
3230 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3232 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3234 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3235 "coherent allocations\n");
3236 goto out_release_regions;
3238 } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3239 dev_err(&pdev->dev, "no usable DMA configuration\n");
3240 goto out_release_regions;
3243 pci_set_master(pdev);
3244 pci_save_state(pdev);
3246 mmio_start = pci_resource_start(pdev, 0);
3247 mmio_len = pci_resource_len(pdev, 0);
3248 ai = t3_get_adapter_info(ent->driver_data);
3250 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3253 goto out_release_regions;
3256 adapter->nofail_skb =
3257 alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3258 if (!adapter->nofail_skb) {
3259 dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3261 goto out_free_adapter;
3264 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3265 if (!adapter->regs) {
3266 dev_err(&pdev->dev, "cannot map device registers\n");
3268 goto out_free_adapter_nofail;
3271 adapter->pdev = pdev;
3272 adapter->name = pci_name(pdev);
3273 adapter->msg_enable = dflt_msg_enable;
3274 adapter->mmio_len = mmio_len;
3276 mutex_init(&adapter->mdio_lock);
3277 spin_lock_init(&adapter->work_lock);
3278 spin_lock_init(&adapter->stats_lock);
3280 INIT_LIST_HEAD(&adapter->adapter_list);
3281 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3282 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3284 INIT_WORK(&adapter->db_full_task, db_full_task);
3285 INIT_WORK(&adapter->db_empty_task, db_empty_task);
3286 INIT_WORK(&adapter->db_drop_task, db_drop_task);
3288 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3290 for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3291 struct net_device *netdev;
3293 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3299 SET_NETDEV_DEV(netdev, &pdev->dev);
3301 adapter->port[i] = netdev;
3302 pi = netdev_priv(netdev);
3303 pi->adapter = adapter;
3305 netif_carrier_off(netdev);
3306 netdev->irq = pdev->irq;
3307 netdev->mem_start = mmio_start;
3308 netdev->mem_end = mmio_start + mmio_len - 1;
3309 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
3310 NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
3311 netdev->features |= netdev->hw_features |
3312 NETIF_F_HW_VLAN_CTAG_TX;
3313 netdev->vlan_features |= netdev->features & VLAN_FEAT;
3315 netdev->features |= NETIF_F_HIGHDMA;
3317 netdev->netdev_ops = &cxgb_netdev_ops;
3318 netdev->ethtool_ops = &cxgb_ethtool_ops;
3319 netdev->min_mtu = 81;
3320 netdev->max_mtu = ETH_MAX_MTU;
3323 pci_set_drvdata(pdev, adapter);
3324 if (t3_prep_adapter(adapter, ai, 1) < 0) {
3330 * The card is now ready to go. If any errors occur during device
3331 * registration we do not fail the whole card but rather proceed only
3332 * with the ports we manage to register successfully. However we must
3333 * register at least one net device.
3335 for_each_port(adapter, i) {
3336 err = register_netdev(adapter->port[i]);
3338 dev_warn(&pdev->dev,
3339 "cannot register net device %s, skipping\n",
3340 adapter->port[i]->name);
3343 * Change the name we use for messages to the name of
3344 * the first successfully registered interface.
3346 if (!adapter->registered_device_map)
3347 adapter->name = adapter->port[i]->name;
3349 __set_bit(i, &adapter->registered_device_map);
3352 if (!adapter->registered_device_map) {
3353 dev_err(&pdev->dev, "could not register any net devices\n");
3357 for_each_port(adapter, i)
3358 cxgb3_init_iscsi_mac(adapter->port[i]);
3360 /* Driver's ready. Reflect it on LEDs */
3361 t3_led_ready(adapter);
3363 if (is_offload(adapter)) {
3364 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3365 cxgb3_adapter_ofld(adapter);
3368 /* See what interrupts we'll be using */
3369 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3370 adapter->flags |= USING_MSIX;
3371 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3372 adapter->flags |= USING_MSI;
3374 set_nqsets(adapter);
3376 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3379 print_port_info(adapter, ai);
3383 iounmap(adapter->regs);
3384 for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3385 if (adapter->port[i])
3386 free_netdev(adapter->port[i]);
3388 out_free_adapter_nofail:
3389 kfree_skb(adapter->nofail_skb);
3394 out_release_regions:
3395 pci_release_regions(pdev);
3397 pci_disable_device(pdev);
3402 static void remove_one(struct pci_dev *pdev)
3404 struct adapter *adapter = pci_get_drvdata(pdev);
3409 t3_sge_stop(adapter);
3410 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3413 if (is_offload(adapter)) {
3414 cxgb3_adapter_unofld(adapter);
3415 if (test_bit(OFFLOAD_DEVMAP_BIT,
3416 &adapter->open_device_map))
3417 offload_close(&adapter->tdev);
3420 for_each_port(adapter, i)
3421 if (test_bit(i, &adapter->registered_device_map))
3422 unregister_netdev(adapter->port[i]);
3424 t3_stop_sge_timers(adapter);
3425 t3_free_sge_resources(adapter);
3426 cxgb_disable_msi(adapter);
3428 for_each_port(adapter, i)
3429 if (adapter->port[i])
3430 free_netdev(adapter->port[i]);
3432 iounmap(adapter->regs);
3433 if (adapter->nofail_skb)
3434 kfree_skb(adapter->nofail_skb);
3436 pci_release_regions(pdev);
3437 pci_disable_device(pdev);
3441 static struct pci_driver driver = {
3443 .id_table = cxgb3_pci_tbl,
3445 .remove = remove_one,
3446 .err_handler = &t3_err_handler,
3449 static int __init cxgb3_init_module(void)
3453 cxgb3_offload_init();
3455 ret = pci_register_driver(&driver);
3459 static void __exit cxgb3_cleanup_module(void)
3461 pci_unregister_driver(&driver);
3463 destroy_workqueue(cxgb3_wq);
3466 module_init(cxgb3_init_module);
3467 module_exit(cxgb3_cleanup_module);