2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/pci.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/netdevice.h>
40 #include <linux/etherdevice.h>
41 #include <linux/if_vlan.h>
42 #include <linux/mdio.h>
43 #include <linux/sockios.h>
44 #include <linux/workqueue.h>
45 #include <linux/proc_fs.h>
46 #include <linux/rtnetlink.h>
47 #include <linux/firmware.h>
48 #include <linux/log2.h>
49 #include <linux/stringify.h>
50 #include <linux/sched.h>
51 #include <linux/slab.h>
52 #include <linux/uaccess.h>
53 #include <linux/nospec.h>
56 #include "cxgb3_ioctl.h"
58 #include "cxgb3_offload.h"
61 #include "cxgb3_ctl_defs.h"
63 #include "firmware_exports.h"
66 MAX_TXQ_ENTRIES = 16384,
67 MAX_CTRL_TXQ_ENTRIES = 1024,
68 MAX_RSPQ_ENTRIES = 16384,
69 MAX_RX_BUFFERS = 16384,
70 MAX_RX_JUMBO_BUFFERS = 16384,
72 MIN_CTRL_TXQ_ENTRIES = 4,
73 MIN_RSPQ_ENTRIES = 32,
77 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
79 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
80 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
81 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
83 #define EEPROM_MAGIC 0x38E2F10C
85 #define CH_DEVICE(devid, idx) \
86 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
88 static const struct pci_device_id cxgb3_pci_tbl[] = {
89 CH_DEVICE(0x20, 0), /* PE9000 */
90 CH_DEVICE(0x21, 1), /* T302E */
91 CH_DEVICE(0x22, 2), /* T310E */
92 CH_DEVICE(0x23, 3), /* T320X */
93 CH_DEVICE(0x24, 1), /* T302X */
94 CH_DEVICE(0x25, 3), /* T320E */
95 CH_DEVICE(0x26, 2), /* T310X */
96 CH_DEVICE(0x30, 2), /* T3B10 */
97 CH_DEVICE(0x31, 3), /* T3B20 */
98 CH_DEVICE(0x32, 1), /* T3B02 */
99 CH_DEVICE(0x35, 6), /* T3C20-derived T3C10 */
100 CH_DEVICE(0x36, 3), /* S320E-CR */
101 CH_DEVICE(0x37, 7), /* N320E-G2 */
105 MODULE_DESCRIPTION(DRV_DESC);
106 MODULE_AUTHOR("Chelsio Communications");
107 MODULE_LICENSE("Dual BSD/GPL");
108 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
110 static int dflt_msg_enable = DFLT_MSG_ENABLE;
112 module_param(dflt_msg_enable, int, 0644);
113 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
116 * The driver uses the best interrupt scheme available on a platform in the
117 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
118 * of these schemes the driver may consider as follows:
120 * msi = 2: choose from among all three options
121 * msi = 1: only consider MSI and pin interrupts
122 * msi = 0: force pin interrupts
126 module_param(msi, int, 0644);
127 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
130 * The driver enables offload as a default.
131 * To disable it, use ofld_disable = 1.
134 static int ofld_disable = 0;
136 module_param(ofld_disable, int, 0644);
137 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
140 * We have work elements that we need to cancel when an interface is taken
141 * down. Normally the work elements would be executed by keventd but that
142 * can deadlock because of linkwatch. If our close method takes the rtnl
143 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
144 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
145 * for our work to complete. Get our own work queue to solve this.
147 struct workqueue_struct *cxgb3_wq;
150 * link_report - show link status and link speed/duplex
151 * @dev: the port whose settings are to be reported
153 * Shows the link status, speed, and duplex of a port.
155 static void link_report(struct net_device *dev)
157 if (!netif_carrier_ok(dev))
158 netdev_info(dev, "link down\n");
160 const char *s = "10Mbps";
161 const struct port_info *p = netdev_priv(dev);
163 switch (p->link_config.speed) {
175 netdev_info(dev, "link up, %s, %s-duplex\n",
176 s, p->link_config.duplex == DUPLEX_FULL
181 static void enable_tx_fifo_drain(struct adapter *adapter,
182 struct port_info *pi)
184 t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0,
186 t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0);
187 t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN);
188 t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN);
191 static void disable_tx_fifo_drain(struct adapter *adapter,
192 struct port_info *pi)
194 t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
198 void t3_os_link_fault(struct adapter *adap, int port_id, int state)
200 struct net_device *dev = adap->port[port_id];
201 struct port_info *pi = netdev_priv(dev);
203 if (state == netif_carrier_ok(dev))
207 struct cmac *mac = &pi->mac;
209 netif_carrier_on(dev);
211 disable_tx_fifo_drain(adap, pi);
213 /* Clear local faults */
214 t3_xgm_intr_disable(adap, pi->port_id);
215 t3_read_reg(adap, A_XGM_INT_STATUS +
218 A_XGM_INT_CAUSE + pi->mac.offset,
221 t3_set_reg_field(adap,
224 F_XGM_INT, F_XGM_INT);
225 t3_xgm_intr_enable(adap, pi->port_id);
227 t3_mac_enable(mac, MAC_DIRECTION_TX);
229 netif_carrier_off(dev);
232 enable_tx_fifo_drain(adap, pi);
238 * t3_os_link_changed - handle link status changes
239 * @adapter: the adapter associated with the link change
240 * @port_id: the port index whose limk status has changed
241 * @link_stat: the new status of the link
242 * @speed: the new speed setting
243 * @duplex: the new duplex setting
244 * @pause: the new flow-control setting
246 * This is the OS-dependent handler for link status changes. The OS
247 * neutral handler takes care of most of the processing for these events,
248 * then calls this handler for any OS-specific processing.
250 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
251 int speed, int duplex, int pause)
253 struct net_device *dev = adapter->port[port_id];
254 struct port_info *pi = netdev_priv(dev);
255 struct cmac *mac = &pi->mac;
257 /* Skip changes from disabled ports. */
258 if (!netif_running(dev))
261 if (link_stat != netif_carrier_ok(dev)) {
263 disable_tx_fifo_drain(adapter, pi);
265 t3_mac_enable(mac, MAC_DIRECTION_RX);
267 /* Clear local faults */
268 t3_xgm_intr_disable(adapter, pi->port_id);
269 t3_read_reg(adapter, A_XGM_INT_STATUS +
271 t3_write_reg(adapter,
272 A_XGM_INT_CAUSE + pi->mac.offset,
275 t3_set_reg_field(adapter,
276 A_XGM_INT_ENABLE + pi->mac.offset,
277 F_XGM_INT, F_XGM_INT);
278 t3_xgm_intr_enable(adapter, pi->port_id);
280 netif_carrier_on(dev);
282 netif_carrier_off(dev);
284 t3_xgm_intr_disable(adapter, pi->port_id);
285 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
286 t3_set_reg_field(adapter,
287 A_XGM_INT_ENABLE + pi->mac.offset,
291 pi->phy.ops->power_down(&pi->phy, 1);
293 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
294 t3_mac_disable(mac, MAC_DIRECTION_RX);
295 t3_link_start(&pi->phy, mac, &pi->link_config);
298 enable_tx_fifo_drain(adapter, pi);
306 * t3_os_phymod_changed - handle PHY module changes
307 * @adap: the adapter associated with the link change
308 * @port_id: the port index whose limk status has changed
310 * This is the OS-dependent handler for PHY module changes. It is
311 * invoked when a PHY module is removed or inserted for any OS-specific
314 void t3_os_phymod_changed(struct adapter *adap, int port_id)
316 static const char *mod_str[] = {
317 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
320 const struct net_device *dev = adap->port[port_id];
321 const struct port_info *pi = netdev_priv(dev);
323 if (pi->phy.modtype == phy_modtype_none)
324 netdev_info(dev, "PHY module unplugged\n");
326 netdev_info(dev, "%s PHY module inserted\n",
327 mod_str[pi->phy.modtype]);
330 static void cxgb_set_rxmode(struct net_device *dev)
332 struct port_info *pi = netdev_priv(dev);
334 t3_mac_set_rx_mode(&pi->mac, dev);
338 * link_start - enable a port
339 * @dev: the device to enable
341 * Performs the MAC and PHY actions needed to enable a port.
343 static void link_start(struct net_device *dev)
345 struct port_info *pi = netdev_priv(dev);
346 struct cmac *mac = &pi->mac;
349 t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
350 t3_mac_set_mtu(mac, dev->mtu);
351 t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
352 t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
353 t3_mac_set_rx_mode(mac, dev);
354 t3_link_start(&pi->phy, mac, &pi->link_config);
355 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
358 static inline void cxgb_disable_msi(struct adapter *adapter)
360 if (adapter->flags & USING_MSIX) {
361 pci_disable_msix(adapter->pdev);
362 adapter->flags &= ~USING_MSIX;
363 } else if (adapter->flags & USING_MSI) {
364 pci_disable_msi(adapter->pdev);
365 adapter->flags &= ~USING_MSI;
370 * Interrupt handler for asynchronous events used with MSI-X.
372 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
374 t3_slow_intr_handler(cookie);
379 * Name the MSI-X interrupts.
381 static void name_msix_vecs(struct adapter *adap)
383 int i, j, msi_idx = 1;
385 strscpy(adap->msix_info[0].desc, adap->name, sizeof(adap->msix_info[0].desc));
387 for_each_port(adap, j) {
388 struct net_device *d = adap->port[j];
389 const struct port_info *pi = netdev_priv(d);
391 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
392 snprintf(adap->msix_info[msi_idx].desc,
393 sizeof(adap->msix_info[0].desc),
394 "%s-%d", d->name, pi->first_qset + i);
399 static int request_msix_data_irqs(struct adapter *adap)
401 int i, j, err, qidx = 0;
403 for_each_port(adap, i) {
404 int nqsets = adap2pinfo(adap, i)->nqsets;
406 for (j = 0; j < nqsets; ++j) {
407 err = request_irq(adap->msix_info[qidx + 1].vec,
408 t3_intr_handler(adap,
411 adap->msix_info[qidx + 1].desc,
412 &adap->sge.qs[qidx]);
415 free_irq(adap->msix_info[qidx + 1].vec,
416 &adap->sge.qs[qidx]);
425 static void free_irq_resources(struct adapter *adapter)
427 if (adapter->flags & USING_MSIX) {
430 free_irq(adapter->msix_info[0].vec, adapter);
431 for_each_port(adapter, i)
432 n += adap2pinfo(adapter, i)->nqsets;
434 for (i = 0; i < n; ++i)
435 free_irq(adapter->msix_info[i + 1].vec,
436 &adapter->sge.qs[i]);
438 free_irq(adapter->pdev->irq, adapter);
441 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
446 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
454 static int init_tp_parity(struct adapter *adap)
458 struct cpl_set_tcb_field *greq;
459 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
461 t3_tp_set_offload_mode(adap, 1);
463 for (i = 0; i < 16; i++) {
464 struct cpl_smt_write_req *req;
466 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
468 skb = adap->nofail_skb;
472 req = __skb_put_zero(skb, sizeof(*req));
473 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
474 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
475 req->mtu_idx = NMTUS - 1;
477 t3_mgmt_tx(adap, skb);
478 if (skb == adap->nofail_skb) {
479 await_mgmt_replies(adap, cnt, i + 1);
480 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
481 if (!adap->nofail_skb)
486 for (i = 0; i < 2048; i++) {
487 struct cpl_l2t_write_req *req;
489 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
491 skb = adap->nofail_skb;
495 req = __skb_put_zero(skb, sizeof(*req));
496 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
497 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
498 req->params = htonl(V_L2T_W_IDX(i));
499 t3_mgmt_tx(adap, skb);
500 if (skb == adap->nofail_skb) {
501 await_mgmt_replies(adap, cnt, 16 + i + 1);
502 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
503 if (!adap->nofail_skb)
508 for (i = 0; i < 2048; i++) {
509 struct cpl_rte_write_req *req;
511 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
513 skb = adap->nofail_skb;
517 req = __skb_put_zero(skb, sizeof(*req));
518 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
519 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
520 req->l2t_idx = htonl(V_L2T_W_IDX(i));
521 t3_mgmt_tx(adap, skb);
522 if (skb == adap->nofail_skb) {
523 await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
524 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
525 if (!adap->nofail_skb)
530 skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
532 skb = adap->nofail_skb;
536 greq = __skb_put_zero(skb, sizeof(*greq));
537 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
538 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
539 greq->mask = cpu_to_be64(1);
540 t3_mgmt_tx(adap, skb);
542 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
543 if (skb == adap->nofail_skb) {
544 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
545 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
548 t3_tp_set_offload_mode(adap, 0);
552 t3_tp_set_offload_mode(adap, 0);
557 * setup_rss - configure RSS
560 * Sets up RSS to distribute packets to multiple receive queues. We
561 * configure the RSS CPU lookup table to distribute to the number of HW
562 * receive queues, and the response queue lookup table to narrow that
563 * down to the response queues actually configured for each port.
564 * We always configure the RSS mapping for two ports since the mapping
565 * table has plenty of entries.
567 static void setup_rss(struct adapter *adap)
570 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
571 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
572 u8 cpus[SGE_QSETS + 1];
573 u16 rspq_map[RSS_TABLE_SIZE + 1];
575 for (i = 0; i < SGE_QSETS; ++i)
577 cpus[SGE_QSETS] = 0xff; /* terminator */
579 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
580 rspq_map[i] = i % nq0;
581 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
583 rspq_map[RSS_TABLE_SIZE] = 0xffff; /* terminator */
585 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
586 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
587 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
590 static void ring_dbs(struct adapter *adap)
594 for (i = 0; i < SGE_QSETS; i++) {
595 struct sge_qset *qs = &adap->sge.qs[i];
598 for (j = 0; j < SGE_TXQ_PER_SET; j++)
599 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
603 static void init_napi(struct adapter *adap)
607 for (i = 0; i < SGE_QSETS; i++) {
608 struct sge_qset *qs = &adap->sge.qs[i];
611 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll);
615 * netif_napi_add() can be called only once per napi_struct because it
616 * adds each new napi_struct to a list. Be careful not to call it a
617 * second time, e.g., during EEH recovery, by making a note of it.
619 adap->flags |= NAPI_INIT;
623 * Wait until all NAPI handlers are descheduled. This includes the handlers of
624 * both netdevices representing interfaces and the dummy ones for the extra
627 static void quiesce_rx(struct adapter *adap)
631 for (i = 0; i < SGE_QSETS; i++)
632 if (adap->sge.qs[i].adap)
633 napi_disable(&adap->sge.qs[i].napi);
636 static void enable_all_napi(struct adapter *adap)
639 for (i = 0; i < SGE_QSETS; i++)
640 if (adap->sge.qs[i].adap)
641 napi_enable(&adap->sge.qs[i].napi);
645 * setup_sge_qsets - configure SGE Tx/Rx/response queues
648 * Determines how many sets of SGE queues to use and initializes them.
649 * We support multiple queue sets per port if we have MSI-X, otherwise
650 * just one queue set per port.
652 static int setup_sge_qsets(struct adapter *adap)
654 int i, j, err, irq_idx = 0, qset_idx = 0;
655 unsigned int ntxq = SGE_TXQ_PER_SET;
657 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
660 for_each_port(adap, i) {
661 struct net_device *dev = adap->port[i];
662 struct port_info *pi = netdev_priv(dev);
664 pi->qs = &adap->sge.qs[pi->first_qset];
665 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
666 err = t3_sge_alloc_qset(adap, qset_idx, 1,
667 (adap->flags & USING_MSIX) ? qset_idx + 1 :
669 &adap->params.sge.qset[qset_idx], ntxq, dev,
670 netdev_get_tx_queue(dev, j));
672 t3_free_sge_resources(adap);
681 static ssize_t attr_show(struct device *d, char *buf,
682 ssize_t(*format) (struct net_device *, char *))
686 /* Synchronize with ioctls that may shut down the device */
688 len = (*format) (to_net_dev(d), buf);
693 static ssize_t attr_store(struct device *d,
694 const char *buf, size_t len,
695 ssize_t(*set) (struct net_device *, unsigned int),
696 unsigned int min_val, unsigned int max_val)
701 if (!capable(CAP_NET_ADMIN))
704 ret = kstrtouint(buf, 0, &val);
707 if (val < min_val || val > max_val)
711 ret = (*set) (to_net_dev(d), val);
718 #define CXGB3_SHOW(name, val_expr) \
719 static ssize_t format_##name(struct net_device *dev, char *buf) \
721 struct port_info *pi = netdev_priv(dev); \
722 struct adapter *adap = pi->adapter; \
723 return sprintf(buf, "%u\n", val_expr); \
725 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
728 return attr_show(d, buf, format_##name); \
731 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
733 struct port_info *pi = netdev_priv(dev);
734 struct adapter *adap = pi->adapter;
735 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
737 if (adap->flags & FULL_INIT_DONE)
739 if (val && adap->params.rev == 0)
741 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
744 adap->params.mc5.nfilters = val;
748 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
749 const char *buf, size_t len)
751 return attr_store(d, buf, len, set_nfilters, 0, ~0);
754 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
756 struct port_info *pi = netdev_priv(dev);
757 struct adapter *adap = pi->adapter;
759 if (adap->flags & FULL_INIT_DONE)
761 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
764 adap->params.mc5.nservers = val;
768 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
769 const char *buf, size_t len)
771 return attr_store(d, buf, len, set_nservers, 0, ~0);
774 #define CXGB3_ATTR_R(name, val_expr) \
775 CXGB3_SHOW(name, val_expr) \
776 static DEVICE_ATTR(name, 0444, show_##name, NULL)
778 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
779 CXGB3_SHOW(name, val_expr) \
780 static DEVICE_ATTR(name, 0644, show_##name, store_method)
782 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
783 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
784 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
786 static struct attribute *cxgb3_attrs[] = {
787 &dev_attr_cam_size.attr,
788 &dev_attr_nfilters.attr,
789 &dev_attr_nservers.attr,
793 static const struct attribute_group cxgb3_attr_group = {
794 .attrs = cxgb3_attrs,
797 static ssize_t tm_attr_show(struct device *d,
798 char *buf, int sched)
800 struct port_info *pi = netdev_priv(to_net_dev(d));
801 struct adapter *adap = pi->adapter;
802 unsigned int v, addr, bpt, cpt;
805 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
807 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
808 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
811 bpt = (v >> 8) & 0xff;
814 len = sprintf(buf, "disabled\n");
816 v = (adap->params.vpd.cclk * 1000) / cpt;
817 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
823 static ssize_t tm_attr_store(struct device *d,
824 const char *buf, size_t len, int sched)
826 struct port_info *pi = netdev_priv(to_net_dev(d));
827 struct adapter *adap = pi->adapter;
831 if (!capable(CAP_NET_ADMIN))
834 ret = kstrtouint(buf, 0, &val);
841 ret = t3_config_sched(adap, val, sched);
848 #define TM_ATTR(name, sched) \
849 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
852 return tm_attr_show(d, buf, sched); \
854 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
855 const char *buf, size_t len) \
857 return tm_attr_store(d, buf, len, sched); \
859 static DEVICE_ATTR(name, 0644, show_##name, store_##name)
870 static struct attribute *offload_attrs[] = {
871 &dev_attr_sched0.attr,
872 &dev_attr_sched1.attr,
873 &dev_attr_sched2.attr,
874 &dev_attr_sched3.attr,
875 &dev_attr_sched4.attr,
876 &dev_attr_sched5.attr,
877 &dev_attr_sched6.attr,
878 &dev_attr_sched7.attr,
882 static const struct attribute_group offload_attr_group = {
883 .attrs = offload_attrs,
887 * Sends an sk_buff to an offload queue driver
888 * after dealing with any active network taps.
890 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
895 ret = t3_offload_tx(tdev, skb);
900 static int write_smt_entry(struct adapter *adapter, int idx)
902 struct cpl_smt_write_req *req;
903 struct port_info *pi = netdev_priv(adapter->port[idx]);
904 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
909 req = __skb_put(skb, sizeof(*req));
910 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
911 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
912 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
914 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
915 memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
917 offload_tx(&adapter->tdev, skb);
921 static int init_smt(struct adapter *adapter)
925 for_each_port(adapter, i)
926 write_smt_entry(adapter, i);
930 static void init_port_mtus(struct adapter *adapter)
932 unsigned int mtus = adapter->port[0]->mtu;
934 if (adapter->port[1])
935 mtus |= adapter->port[1]->mtu << 16;
936 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
939 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
943 struct mngt_pktsched_wr *req;
946 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
948 skb = adap->nofail_skb;
952 req = skb_put(skb, sizeof(*req));
953 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
954 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
960 ret = t3_mgmt_tx(adap, skb);
961 if (skb == adap->nofail_skb) {
962 adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
964 if (!adap->nofail_skb)
971 static int bind_qsets(struct adapter *adap)
975 for_each_port(adap, i) {
976 const struct port_info *pi = adap2pinfo(adap, i);
978 for (j = 0; j < pi->nqsets; ++j) {
979 int ret = send_pktsched_cmd(adap, 1,
980 pi->first_qset + j, -1,
991 #define FW_FNAME "/*(DEBLOBBED)*/"
993 #define TPSRAM_NAME "/*(DEBLOBBED)*/"
994 #define AEL2005_OPT_EDC_NAME "/*(DEBLOBBED)*/"
995 #define AEL2005_TWX_EDC_NAME "/*(DEBLOBBED)*/"
996 #define AEL2020_TWX_EDC_NAME "/*(DEBLOBBED)*/"
999 static inline const char *get_edc_fw_name(int edc_idx)
1001 const char *fw_name = NULL;
1004 case EDC_OPT_AEL2005:
1005 fw_name = AEL2005_OPT_EDC_NAME;
1007 case EDC_TWX_AEL2005:
1008 fw_name = AEL2005_TWX_EDC_NAME;
1010 case EDC_TWX_AEL2020:
1011 fw_name = AEL2020_TWX_EDC_NAME;
1017 int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
1019 struct adapter *adapter = phy->adapter;
1020 const struct firmware *fw;
1021 const char *fw_name;
1024 u16 *cache = phy->phy_cache;
1025 int i, ret = -EINVAL;
1027 fw_name = get_edc_fw_name(edc_idx);
1029 ret = reject_firmware(&fw, fw_name, &adapter->pdev->dev);
1031 dev_err(&adapter->pdev->dev,
1032 "could not upgrade firmware: unable to load %s\n",
1037 /* check size, take checksum in account */
1038 if (fw->size > size + 4) {
1039 CH_ERR(adapter, "firmware image too large %u, expected %d\n",
1040 (unsigned int)fw->size, size + 4);
1044 /* compute checksum */
1045 p = (const __be32 *)fw->data;
1046 for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++)
1047 csum += ntohl(p[i]);
1049 if (csum != 0xffffffff) {
1050 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1055 for (i = 0; i < size / 4 ; i++) {
1056 *cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
1057 *cache++ = be32_to_cpu(p[i]) & 0xffff;
1060 release_firmware(fw);
1065 static int upgrade_fw(struct adapter *adap)
1068 const struct firmware *fw;
1069 struct device *dev = &adap->pdev->dev;
1071 ret = reject_firmware(&fw, FW_FNAME, dev);
1073 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
1077 ret = t3_load_fw(adap, fw->data, fw->size);
1078 release_firmware(fw);
1081 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
1082 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1084 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
1085 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1090 static inline char t3rev2char(struct adapter *adapter)
1094 switch(adapter->params.rev) {
1106 static int update_tpsram(struct adapter *adap)
1108 const struct firmware *tpsram;
1110 struct device *dev = &adap->pdev->dev;
1114 rev = t3rev2char(adap);
1118 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev);
1120 ret = reject_firmware(&tpsram, buf, dev);
1122 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
1127 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
1129 goto release_tpsram;
1131 ret = t3_set_proto_sram(adap, tpsram->data);
1134 "successful update of protocol engine "
1136 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1138 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
1139 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1141 dev_err(dev, "loading protocol SRAM failed\n");
1144 release_firmware(tpsram);
1150 * t3_synchronize_rx - wait for current Rx processing on a port to complete
1151 * @adap: the adapter
1154 * Ensures that current Rx processing on any of the queues associated with
1155 * the given port completes before returning. We do this by acquiring and
1156 * releasing the locks of the response queues associated with the port.
1158 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
1162 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1163 struct sge_rspq *q = &adap->sge.qs[i].rspq;
1165 spin_lock_irq(&q->lock);
1166 spin_unlock_irq(&q->lock);
1170 static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
1172 struct port_info *pi = netdev_priv(dev);
1173 struct adapter *adapter = pi->adapter;
1175 if (adapter->params.rev > 0) {
1176 t3_set_vlan_accel(adapter, 1 << pi->port_id,
1177 features & NETIF_F_HW_VLAN_CTAG_RX);
1179 /* single control for all ports */
1180 unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_CTAG_RX;
1182 for_each_port(adapter, i)
1184 adapter->port[i]->features &
1185 NETIF_F_HW_VLAN_CTAG_RX;
1187 t3_set_vlan_accel(adapter, 1, have_vlans);
1189 t3_synchronize_rx(adapter, pi);
1193 * cxgb_up - enable the adapter
1194 * @adap: adapter being enabled
1196 * Called when the first port is enabled, this function performs the
1197 * actions necessary to make an adapter operational, such as completing
1198 * the initialization of HW modules, and enabling interrupts.
1200 * Must be called with the rtnl lock held.
1202 static int cxgb_up(struct adapter *adap)
1206 if (!(adap->flags & FULL_INIT_DONE)) {
1207 err = t3_check_fw_version(adap);
1208 if (err == -EINVAL) {
1209 err = upgrade_fw(adap);
1210 CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1211 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1212 FW_VERSION_MICRO, err ? "failed" : "succeeded");
1215 err = t3_check_tpsram_version(adap);
1216 if (err == -EINVAL) {
1217 err = update_tpsram(adap);
1218 CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1219 TP_VERSION_MAJOR, TP_VERSION_MINOR,
1220 TP_VERSION_MICRO, err ? "failed" : "succeeded");
1224 * Clear interrupts now to catch errors if t3_init_hw fails.
1225 * We clear them again later as initialization may trigger
1226 * conditions that can interrupt.
1228 t3_intr_clear(adap);
1230 err = t3_init_hw(adap, 0);
1234 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1235 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1237 err = setup_sge_qsets(adap);
1241 for_each_port(adap, i)
1242 cxgb_vlan_mode(adap->port[i], adap->port[i]->features);
1245 if (!(adap->flags & NAPI_INIT))
1248 t3_start_sge_timers(adap);
1249 adap->flags |= FULL_INIT_DONE;
1252 t3_intr_clear(adap);
1254 if (adap->flags & USING_MSIX) {
1255 name_msix_vecs(adap);
1256 err = request_irq(adap->msix_info[0].vec,
1257 t3_async_intr_handler, 0,
1258 adap->msix_info[0].desc, adap);
1262 err = request_msix_data_irqs(adap);
1264 free_irq(adap->msix_info[0].vec, adap);
1268 err = request_irq(adap->pdev->irq,
1269 t3_intr_handler(adap, adap->sge.qs[0].rspq.polling),
1270 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
1276 enable_all_napi(adap);
1278 t3_intr_enable(adap);
1280 if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1281 is_offload(adap) && init_tp_parity(adap) == 0)
1282 adap->flags |= TP_PARITY_INIT;
1284 if (adap->flags & TP_PARITY_INIT) {
1285 t3_write_reg(adap, A_TP_INT_CAUSE,
1286 F_CMCACHEPERR | F_ARPLUTPERR);
1287 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1290 if (!(adap->flags & QUEUES_BOUND)) {
1291 int ret = bind_qsets(adap);
1294 CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
1295 t3_intr_disable(adap);
1297 free_irq_resources(adap);
1301 adap->flags |= QUEUES_BOUND;
1307 CH_ERR(adap, "request_irq failed, err %d\n", err);
1312 * Release resources when all the ports and offloading have been stopped.
1314 static void cxgb_down(struct adapter *adapter, int on_wq)
1316 t3_sge_stop(adapter);
1317 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
1318 t3_intr_disable(adapter);
1319 spin_unlock_irq(&adapter->work_lock);
1321 free_irq_resources(adapter);
1322 quiesce_rx(adapter);
1323 t3_sge_stop(adapter);
1325 flush_workqueue(cxgb3_wq);/* wait for external IRQ handler */
1328 static void schedule_chk_task(struct adapter *adap)
1332 timeo = adap->params.linkpoll_period ?
1333 (HZ * adap->params.linkpoll_period) / 10 :
1334 adap->params.stats_update_period * HZ;
1336 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1339 static int offload_open(struct net_device *dev)
1341 struct port_info *pi = netdev_priv(dev);
1342 struct adapter *adapter = pi->adapter;
1343 struct t3cdev *tdev = dev2t3cdev(dev);
1344 int adap_up = adapter->open_device_map & PORT_MASK;
1347 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1350 if (!adap_up && (err = cxgb_up(adapter)) < 0)
1353 t3_tp_set_offload_mode(adapter, 1);
1354 tdev->lldev = adapter->port[0];
1355 err = cxgb3_offload_activate(adapter);
1359 init_port_mtus(adapter);
1360 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1361 adapter->params.b_wnd,
1362 adapter->params.rev == 0 ?
1363 adapter->port[0]->mtu : 0xffff);
1366 if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1367 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1369 /* Call back all registered clients */
1370 cxgb3_add_clients(tdev);
1373 /* restore them in case the offload module has changed them */
1375 t3_tp_set_offload_mode(adapter, 0);
1376 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1377 cxgb3_set_dummy_ops(tdev);
1382 static int offload_close(struct t3cdev *tdev)
1384 struct adapter *adapter = tdev2adap(tdev);
1385 struct t3c_data *td = T3C_DATA(tdev);
1387 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1390 /* Call back all registered clients */
1391 cxgb3_remove_clients(tdev);
1393 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1395 /* Flush work scheduled while releasing TIDs */
1396 flush_work(&td->tid_release_task);
1399 cxgb3_set_dummy_ops(tdev);
1400 t3_tp_set_offload_mode(adapter, 0);
1401 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1403 if (!adapter->open_device_map)
1404 cxgb_down(adapter, 0);
1406 cxgb3_offload_deactivate(adapter);
1410 static int cxgb_open(struct net_device *dev)
1412 struct port_info *pi = netdev_priv(dev);
1413 struct adapter *adapter = pi->adapter;
1414 int other_ports = adapter->open_device_map & PORT_MASK;
1417 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1420 set_bit(pi->port_id, &adapter->open_device_map);
1421 if (is_offload(adapter) && !ofld_disable) {
1422 err = offload_open(dev);
1424 pr_warn("Could not initialize offload capabilities\n");
1427 netif_set_real_num_tx_queues(dev, pi->nqsets);
1428 err = netif_set_real_num_rx_queues(dev, pi->nqsets);
1432 t3_port_intr_enable(adapter, pi->port_id);
1433 netif_tx_start_all_queues(dev);
1435 schedule_chk_task(adapter);
1437 cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
1441 static int __cxgb_close(struct net_device *dev, int on_wq)
1443 struct port_info *pi = netdev_priv(dev);
1444 struct adapter *adapter = pi->adapter;
1447 if (!adapter->open_device_map)
1450 /* Stop link fault interrupts */
1451 t3_xgm_intr_disable(adapter, pi->port_id);
1452 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1454 t3_port_intr_disable(adapter, pi->port_id);
1455 netif_tx_stop_all_queues(dev);
1456 pi->phy.ops->power_down(&pi->phy, 1);
1457 netif_carrier_off(dev);
1458 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1460 spin_lock_irq(&adapter->work_lock); /* sync with update task */
1461 clear_bit(pi->port_id, &adapter->open_device_map);
1462 spin_unlock_irq(&adapter->work_lock);
1464 if (!(adapter->open_device_map & PORT_MASK))
1465 cancel_delayed_work_sync(&adapter->adap_check_task);
1467 if (!adapter->open_device_map)
1468 cxgb_down(adapter, on_wq);
1470 cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
1474 static int cxgb_close(struct net_device *dev)
1476 return __cxgb_close(dev, 0);
1479 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1481 struct port_info *pi = netdev_priv(dev);
1482 struct adapter *adapter = pi->adapter;
1483 struct net_device_stats *ns = &dev->stats;
1484 const struct mac_stats *pstats;
1486 spin_lock(&adapter->stats_lock);
1487 pstats = t3_mac_update_stats(&pi->mac);
1488 spin_unlock(&adapter->stats_lock);
1490 ns->tx_bytes = pstats->tx_octets;
1491 ns->tx_packets = pstats->tx_frames;
1492 ns->rx_bytes = pstats->rx_octets;
1493 ns->rx_packets = pstats->rx_frames;
1494 ns->multicast = pstats->rx_mcast_frames;
1496 ns->tx_errors = pstats->tx_underrun;
1497 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1498 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1499 pstats->rx_fifo_ovfl;
1501 /* detailed rx_errors */
1502 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1503 ns->rx_over_errors = 0;
1504 ns->rx_crc_errors = pstats->rx_fcs_errs;
1505 ns->rx_frame_errors = pstats->rx_symbol_errs;
1506 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1507 ns->rx_missed_errors = pstats->rx_cong_drops;
1509 /* detailed tx_errors */
1510 ns->tx_aborted_errors = 0;
1511 ns->tx_carrier_errors = 0;
1512 ns->tx_fifo_errors = pstats->tx_underrun;
1513 ns->tx_heartbeat_errors = 0;
1514 ns->tx_window_errors = 0;
1518 static u32 get_msglevel(struct net_device *dev)
1520 struct port_info *pi = netdev_priv(dev);
1521 struct adapter *adapter = pi->adapter;
1523 return adapter->msg_enable;
1526 static void set_msglevel(struct net_device *dev, u32 val)
1528 struct port_info *pi = netdev_priv(dev);
1529 struct adapter *adapter = pi->adapter;
1531 adapter->msg_enable = val;
1534 static const char stats_strings[][ETH_GSTRING_LEN] = {
1537 "TxMulticastFramesOK",
1538 "TxBroadcastFramesOK",
1545 "TxFrames128To255 ",
1546 "TxFrames256To511 ",
1547 "TxFrames512To1023 ",
1548 "TxFrames1024To1518 ",
1549 "TxFrames1519ToMax ",
1553 "RxMulticastFramesOK",
1554 "RxBroadcastFramesOK",
1565 "RxFrames128To255 ",
1566 "RxFrames256To511 ",
1567 "RxFrames512To1023 ",
1568 "RxFrames1024To1518 ",
1569 "RxFrames1519ToMax ",
1582 "CheckTXEnToggled ",
1588 static int get_sset_count(struct net_device *dev, int sset)
1592 return ARRAY_SIZE(stats_strings);
1598 #define T3_REGMAP_SIZE (3 * 1024)
1600 static int get_regs_len(struct net_device *dev)
1602 return T3_REGMAP_SIZE;
1605 static int get_eeprom_len(struct net_device *dev)
1610 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1612 struct port_info *pi = netdev_priv(dev);
1613 struct adapter *adapter = pi->adapter;
1617 spin_lock(&adapter->stats_lock);
1618 t3_get_fw_version(adapter, &fw_vers);
1619 t3_get_tp_version(adapter, &tp_vers);
1620 spin_unlock(&adapter->stats_lock);
1622 strscpy(info->driver, DRV_NAME, sizeof(info->driver));
1623 strscpy(info->bus_info, pci_name(adapter->pdev),
1624 sizeof(info->bus_info));
1626 snprintf(info->fw_version, sizeof(info->fw_version),
1627 "%s %u.%u.%u TP %u.%u.%u",
1628 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1629 G_FW_VERSION_MAJOR(fw_vers),
1630 G_FW_VERSION_MINOR(fw_vers),
1631 G_FW_VERSION_MICRO(fw_vers),
1632 G_TP_VERSION_MAJOR(tp_vers),
1633 G_TP_VERSION_MINOR(tp_vers),
1634 G_TP_VERSION_MICRO(tp_vers));
1637 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1639 if (stringset == ETH_SS_STATS)
1640 memcpy(data, stats_strings, sizeof(stats_strings));
1643 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1644 struct port_info *p, int idx)
1647 unsigned long tot = 0;
1649 for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1650 tot += adapter->sge.qs[i].port_stats[idx];
1654 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1657 struct port_info *pi = netdev_priv(dev);
1658 struct adapter *adapter = pi->adapter;
1659 const struct mac_stats *s;
1661 spin_lock(&adapter->stats_lock);
1662 s = t3_mac_update_stats(&pi->mac);
1663 spin_unlock(&adapter->stats_lock);
1665 *data++ = s->tx_octets;
1666 *data++ = s->tx_frames;
1667 *data++ = s->tx_mcast_frames;
1668 *data++ = s->tx_bcast_frames;
1669 *data++ = s->tx_pause;
1670 *data++ = s->tx_underrun;
1671 *data++ = s->tx_fifo_urun;
1673 *data++ = s->tx_frames_64;
1674 *data++ = s->tx_frames_65_127;
1675 *data++ = s->tx_frames_128_255;
1676 *data++ = s->tx_frames_256_511;
1677 *data++ = s->tx_frames_512_1023;
1678 *data++ = s->tx_frames_1024_1518;
1679 *data++ = s->tx_frames_1519_max;
1681 *data++ = s->rx_octets;
1682 *data++ = s->rx_frames;
1683 *data++ = s->rx_mcast_frames;
1684 *data++ = s->rx_bcast_frames;
1685 *data++ = s->rx_pause;
1686 *data++ = s->rx_fcs_errs;
1687 *data++ = s->rx_symbol_errs;
1688 *data++ = s->rx_short;
1689 *data++ = s->rx_jabber;
1690 *data++ = s->rx_too_long;
1691 *data++ = s->rx_fifo_ovfl;
1693 *data++ = s->rx_frames_64;
1694 *data++ = s->rx_frames_65_127;
1695 *data++ = s->rx_frames_128_255;
1696 *data++ = s->rx_frames_256_511;
1697 *data++ = s->rx_frames_512_1023;
1698 *data++ = s->rx_frames_1024_1518;
1699 *data++ = s->rx_frames_1519_max;
1701 *data++ = pi->phy.fifo_errors;
1703 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1704 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1705 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1706 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1707 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1711 *data++ = s->rx_cong_drops;
1713 *data++ = s->num_toggled;
1714 *data++ = s->num_resets;
1716 *data++ = s->link_faults;
1719 static inline void reg_block_dump(struct adapter *ap, void *buf,
1720 unsigned int start, unsigned int end)
1722 u32 *p = buf + start;
1724 for (; start <= end; start += sizeof(u32))
1725 *p++ = t3_read_reg(ap, start);
1728 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1731 struct port_info *pi = netdev_priv(dev);
1732 struct adapter *ap = pi->adapter;
1736 * bits 0..9: chip version
1737 * bits 10..15: chip revision
1738 * bit 31: set for PCIe cards
1740 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1743 * We skip the MAC statistics registers because they are clear-on-read.
1744 * Also reading multi-register stats would need to synchronize with the
1745 * periodic mac stats accumulation. Hard to justify the complexity.
1747 memset(buf, 0, T3_REGMAP_SIZE);
1748 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1749 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1750 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1751 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1752 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1753 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1754 XGM_REG(A_XGM_SERDES_STAT3, 1));
1755 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1756 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1759 static int restart_autoneg(struct net_device *dev)
1761 struct port_info *p = netdev_priv(dev);
1763 if (!netif_running(dev))
1765 if (p->link_config.autoneg != AUTONEG_ENABLE)
1767 p->phy.ops->autoneg_restart(&p->phy);
1771 static int set_phys_id(struct net_device *dev,
1772 enum ethtool_phys_id_state state)
1774 struct port_info *pi = netdev_priv(dev);
1775 struct adapter *adapter = pi->adapter;
1778 case ETHTOOL_ID_ACTIVE:
1779 return 1; /* cycle on/off once per second */
1781 case ETHTOOL_ID_OFF:
1782 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
1786 case ETHTOOL_ID_INACTIVE:
1787 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1794 static int get_link_ksettings(struct net_device *dev,
1795 struct ethtool_link_ksettings *cmd)
1797 struct port_info *p = netdev_priv(dev);
1800 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1801 p->link_config.supported);
1802 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1803 p->link_config.advertising);
1805 if (netif_carrier_ok(dev)) {
1806 cmd->base.speed = p->link_config.speed;
1807 cmd->base.duplex = p->link_config.duplex;
1809 cmd->base.speed = SPEED_UNKNOWN;
1810 cmd->base.duplex = DUPLEX_UNKNOWN;
1813 ethtool_convert_link_mode_to_legacy_u32(&supported,
1814 cmd->link_modes.supported);
1816 cmd->base.port = (supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1817 cmd->base.phy_address = p->phy.mdio.prtad;
1818 cmd->base.autoneg = p->link_config.autoneg;
1822 static int speed_duplex_to_caps(int speed, int duplex)
1828 if (duplex == DUPLEX_FULL)
1829 cap = SUPPORTED_10baseT_Full;
1831 cap = SUPPORTED_10baseT_Half;
1834 if (duplex == DUPLEX_FULL)
1835 cap = SUPPORTED_100baseT_Full;
1837 cap = SUPPORTED_100baseT_Half;
1840 if (duplex == DUPLEX_FULL)
1841 cap = SUPPORTED_1000baseT_Full;
1843 cap = SUPPORTED_1000baseT_Half;
1846 if (duplex == DUPLEX_FULL)
1847 cap = SUPPORTED_10000baseT_Full;
1852 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1853 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1854 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1855 ADVERTISED_10000baseT_Full)
1857 static int set_link_ksettings(struct net_device *dev,
1858 const struct ethtool_link_ksettings *cmd)
1860 struct port_info *p = netdev_priv(dev);
1861 struct link_config *lc = &p->link_config;
1864 ethtool_convert_link_mode_to_legacy_u32(&advertising,
1865 cmd->link_modes.advertising);
1867 if (!(lc->supported & SUPPORTED_Autoneg)) {
1869 * PHY offers a single speed/duplex. See if that's what's
1872 if (cmd->base.autoneg == AUTONEG_DISABLE) {
1873 u32 speed = cmd->base.speed;
1874 int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
1875 if (lc->supported & cap)
1881 if (cmd->base.autoneg == AUTONEG_DISABLE) {
1882 u32 speed = cmd->base.speed;
1883 int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
1885 if (!(lc->supported & cap) || (speed == SPEED_1000))
1887 lc->requested_speed = speed;
1888 lc->requested_duplex = cmd->base.duplex;
1889 lc->advertising = 0;
1891 advertising &= ADVERTISED_MASK;
1892 advertising &= lc->supported;
1895 lc->requested_speed = SPEED_INVALID;
1896 lc->requested_duplex = DUPLEX_INVALID;
1897 lc->advertising = advertising | ADVERTISED_Autoneg;
1899 lc->autoneg = cmd->base.autoneg;
1900 if (netif_running(dev))
1901 t3_link_start(&p->phy, &p->mac, lc);
1905 static void get_pauseparam(struct net_device *dev,
1906 struct ethtool_pauseparam *epause)
1908 struct port_info *p = netdev_priv(dev);
1910 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1911 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1912 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1915 static int set_pauseparam(struct net_device *dev,
1916 struct ethtool_pauseparam *epause)
1918 struct port_info *p = netdev_priv(dev);
1919 struct link_config *lc = &p->link_config;
1921 if (epause->autoneg == AUTONEG_DISABLE)
1922 lc->requested_fc = 0;
1923 else if (lc->supported & SUPPORTED_Autoneg)
1924 lc->requested_fc = PAUSE_AUTONEG;
1928 if (epause->rx_pause)
1929 lc->requested_fc |= PAUSE_RX;
1930 if (epause->tx_pause)
1931 lc->requested_fc |= PAUSE_TX;
1932 if (lc->autoneg == AUTONEG_ENABLE) {
1933 if (netif_running(dev))
1934 t3_link_start(&p->phy, &p->mac, lc);
1936 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1937 if (netif_running(dev))
1938 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1943 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
1944 struct kernel_ethtool_ringparam *kernel_e,
1945 struct netlink_ext_ack *extack)
1947 struct port_info *pi = netdev_priv(dev);
1948 struct adapter *adapter = pi->adapter;
1949 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1951 e->rx_max_pending = MAX_RX_BUFFERS;
1952 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1953 e->tx_max_pending = MAX_TXQ_ENTRIES;
1955 e->rx_pending = q->fl_size;
1956 e->rx_mini_pending = q->rspq_size;
1957 e->rx_jumbo_pending = q->jumbo_size;
1958 e->tx_pending = q->txq_size[0];
1961 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
1962 struct kernel_ethtool_ringparam *kernel_e,
1963 struct netlink_ext_ack *extack)
1965 struct port_info *pi = netdev_priv(dev);
1966 struct adapter *adapter = pi->adapter;
1967 struct qset_params *q;
1970 if (e->rx_pending > MAX_RX_BUFFERS ||
1971 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1972 e->tx_pending > MAX_TXQ_ENTRIES ||
1973 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1974 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1975 e->rx_pending < MIN_FL_ENTRIES ||
1976 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1977 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1980 if (adapter->flags & FULL_INIT_DONE)
1983 q = &adapter->params.sge.qset[pi->first_qset];
1984 for (i = 0; i < pi->nqsets; ++i, ++q) {
1985 q->rspq_size = e->rx_mini_pending;
1986 q->fl_size = e->rx_pending;
1987 q->jumbo_size = e->rx_jumbo_pending;
1988 q->txq_size[0] = e->tx_pending;
1989 q->txq_size[1] = e->tx_pending;
1990 q->txq_size[2] = e->tx_pending;
1995 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c,
1996 struct kernel_ethtool_coalesce *kernel_coal,
1997 struct netlink_ext_ack *extack)
1999 struct port_info *pi = netdev_priv(dev);
2000 struct adapter *adapter = pi->adapter;
2001 struct qset_params *qsp;
2002 struct sge_qset *qs;
2005 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
2008 for (i = 0; i < pi->nqsets; i++) {
2009 qsp = &adapter->params.sge.qset[i];
2010 qs = &adapter->sge.qs[i];
2011 qsp->coalesce_usecs = c->rx_coalesce_usecs;
2012 t3_update_qset_coalesce(qs, qsp);
2018 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c,
2019 struct kernel_ethtool_coalesce *kernel_coal,
2020 struct netlink_ext_ack *extack)
2022 struct port_info *pi = netdev_priv(dev);
2023 struct adapter *adapter = pi->adapter;
2024 struct qset_params *q = adapter->params.sge.qset;
2026 c->rx_coalesce_usecs = q->coalesce_usecs;
2030 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2033 struct port_info *pi = netdev_priv(dev);
2034 struct adapter *adapter = pi->adapter;
2037 e->magic = EEPROM_MAGIC;
2038 cnt = pci_read_vpd(adapter->pdev, e->offset, e->len, data);
2047 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2050 struct port_info *pi = netdev_priv(dev);
2051 struct adapter *adapter = pi->adapter;
2052 u32 aligned_offset, aligned_len;
2056 if (eeprom->magic != EEPROM_MAGIC)
2059 aligned_offset = eeprom->offset & ~3;
2060 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2062 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2063 buf = kmalloc(aligned_len, GFP_KERNEL);
2066 err = pci_read_vpd(adapter->pdev, aligned_offset, aligned_len,
2070 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2074 err = t3_seeprom_wp(adapter, 0);
2078 err = pci_write_vpd(adapter->pdev, aligned_offset, aligned_len, buf);
2080 err = t3_seeprom_wp(adapter, 1);
2084 return err < 0 ? err : 0;
2087 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2091 memset(&wol->sopass, 0, sizeof(wol->sopass));
2094 static const struct ethtool_ops cxgb_ethtool_ops = {
2095 .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
2096 .get_drvinfo = get_drvinfo,
2097 .get_msglevel = get_msglevel,
2098 .set_msglevel = set_msglevel,
2099 .get_ringparam = get_sge_param,
2100 .set_ringparam = set_sge_param,
2101 .get_coalesce = get_coalesce,
2102 .set_coalesce = set_coalesce,
2103 .get_eeprom_len = get_eeprom_len,
2104 .get_eeprom = get_eeprom,
2105 .set_eeprom = set_eeprom,
2106 .get_pauseparam = get_pauseparam,
2107 .set_pauseparam = set_pauseparam,
2108 .get_link = ethtool_op_get_link,
2109 .get_strings = get_strings,
2110 .set_phys_id = set_phys_id,
2111 .nway_reset = restart_autoneg,
2112 .get_sset_count = get_sset_count,
2113 .get_ethtool_stats = get_stats,
2114 .get_regs_len = get_regs_len,
2115 .get_regs = get_regs,
2117 .get_link_ksettings = get_link_ksettings,
2118 .set_link_ksettings = set_link_ksettings,
2121 static int cxgb_in_range(int val, int lo, int hi)
2123 return val < 0 || (val <= hi && val >= lo);
2126 static int cxgb_siocdevprivate(struct net_device *dev,
2127 struct ifreq *ifreq,
2128 void __user *useraddr,
2131 struct port_info *pi = netdev_priv(dev);
2132 struct adapter *adapter = pi->adapter;
2135 if (cmd != SIOCCHIOCTL)
2138 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
2142 case CHELSIO_SET_QSET_PARAMS:{
2144 struct qset_params *q;
2145 struct ch_qset_params t;
2146 int q1 = pi->first_qset;
2147 int nqsets = pi->nqsets;
2149 if (!capable(CAP_NET_ADMIN))
2151 if (copy_from_user(&t, useraddr, sizeof(t)))
2153 if (t.cmd != CHELSIO_SET_QSET_PARAMS)
2155 if (t.qset_idx >= SGE_QSETS)
2157 if (!cxgb_in_range(t.intr_lat, 0, M_NEWTIMER) ||
2158 !cxgb_in_range(t.cong_thres, 0, 255) ||
2159 !cxgb_in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
2161 !cxgb_in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
2163 !cxgb_in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2164 MAX_CTRL_TXQ_ENTRIES) ||
2165 !cxgb_in_range(t.fl_size[0], MIN_FL_ENTRIES,
2167 !cxgb_in_range(t.fl_size[1], MIN_FL_ENTRIES,
2168 MAX_RX_JUMBO_BUFFERS) ||
2169 !cxgb_in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
2173 if ((adapter->flags & FULL_INIT_DONE) &&
2174 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2175 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
2176 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
2177 t.polling >= 0 || t.cong_thres >= 0))
2180 /* Allow setting of any available qset when offload enabled */
2181 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2183 for_each_port(adapter, i) {
2184 pi = adap2pinfo(adapter, i);
2185 nqsets += pi->first_qset + pi->nqsets;
2189 if (t.qset_idx < q1)
2191 if (t.qset_idx > q1 + nqsets - 1)
2194 q = &adapter->params.sge.qset[t.qset_idx];
2196 if (t.rspq_size >= 0)
2197 q->rspq_size = t.rspq_size;
2198 if (t.fl_size[0] >= 0)
2199 q->fl_size = t.fl_size[0];
2200 if (t.fl_size[1] >= 0)
2201 q->jumbo_size = t.fl_size[1];
2202 if (t.txq_size[0] >= 0)
2203 q->txq_size[0] = t.txq_size[0];
2204 if (t.txq_size[1] >= 0)
2205 q->txq_size[1] = t.txq_size[1];
2206 if (t.txq_size[2] >= 0)
2207 q->txq_size[2] = t.txq_size[2];
2208 if (t.cong_thres >= 0)
2209 q->cong_thres = t.cong_thres;
2210 if (t.intr_lat >= 0) {
2211 struct sge_qset *qs =
2212 &adapter->sge.qs[t.qset_idx];
2214 q->coalesce_usecs = t.intr_lat;
2215 t3_update_qset_coalesce(qs, q);
2217 if (t.polling >= 0) {
2218 if (adapter->flags & USING_MSIX)
2219 q->polling = t.polling;
2221 /* No polling with INTx for T3A */
2222 if (adapter->params.rev == 0 &&
2223 !(adapter->flags & USING_MSI))
2226 for (i = 0; i < SGE_QSETS; i++) {
2227 q = &adapter->params.sge.
2229 q->polling = t.polling;
2236 dev->wanted_features |= NETIF_F_GRO;
2238 dev->wanted_features &= ~NETIF_F_GRO;
2239 netdev_update_features(dev);
2244 case CHELSIO_GET_QSET_PARAMS:{
2245 struct qset_params *q;
2246 struct ch_qset_params t;
2247 int q1 = pi->first_qset;
2248 int nqsets = pi->nqsets;
2251 if (copy_from_user(&t, useraddr, sizeof(t)))
2254 if (t.cmd != CHELSIO_GET_QSET_PARAMS)
2257 /* Display qsets for all ports when offload enabled */
2258 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2260 for_each_port(adapter, i) {
2261 pi = adap2pinfo(adapter, i);
2262 nqsets = pi->first_qset + pi->nqsets;
2266 if (t.qset_idx >= nqsets)
2268 t.qset_idx = array_index_nospec(t.qset_idx, nqsets);
2270 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2271 t.rspq_size = q->rspq_size;
2272 t.txq_size[0] = q->txq_size[0];
2273 t.txq_size[1] = q->txq_size[1];
2274 t.txq_size[2] = q->txq_size[2];
2275 t.fl_size[0] = q->fl_size;
2276 t.fl_size[1] = q->jumbo_size;
2277 t.polling = q->polling;
2278 t.lro = !!(dev->features & NETIF_F_GRO);
2279 t.intr_lat = q->coalesce_usecs;
2280 t.cong_thres = q->cong_thres;
2283 if (adapter->flags & USING_MSIX)
2284 t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2286 t.vector = adapter->pdev->irq;
2288 if (copy_to_user(useraddr, &t, sizeof(t)))
2292 case CHELSIO_SET_QSET_NUM:{
2293 struct ch_reg edata;
2294 unsigned int i, first_qset = 0, other_qsets = 0;
2296 if (!capable(CAP_NET_ADMIN))
2298 if (adapter->flags & FULL_INIT_DONE)
2300 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2302 if (edata.cmd != CHELSIO_SET_QSET_NUM)
2304 if (edata.val < 1 ||
2305 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2308 for_each_port(adapter, i)
2309 if (adapter->port[i] && adapter->port[i] != dev)
2310 other_qsets += adap2pinfo(adapter, i)->nqsets;
2312 if (edata.val + other_qsets > SGE_QSETS)
2315 pi->nqsets = edata.val;
2317 for_each_port(adapter, i)
2318 if (adapter->port[i]) {
2319 pi = adap2pinfo(adapter, i);
2320 pi->first_qset = first_qset;
2321 first_qset += pi->nqsets;
2325 case CHELSIO_GET_QSET_NUM:{
2326 struct ch_reg edata;
2328 memset(&edata, 0, sizeof(struct ch_reg));
2330 edata.cmd = CHELSIO_GET_QSET_NUM;
2331 edata.val = pi->nqsets;
2332 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2336 case CHELSIO_LOAD_FW:{
2338 struct ch_mem_range t;
2340 if (!capable(CAP_SYS_RAWIO))
2342 if (copy_from_user(&t, useraddr, sizeof(t)))
2344 if (t.cmd != CHELSIO_LOAD_FW)
2346 /* Check t.len sanity ? */
2347 fw_data = memdup_user(useraddr + sizeof(t), t.len);
2348 if (IS_ERR(fw_data))
2349 return PTR_ERR(fw_data);
2351 ret = t3_load_fw(adapter, fw_data, t.len);
2357 case CHELSIO_SETMTUTAB:{
2361 if (!is_offload(adapter))
2363 if (!capable(CAP_NET_ADMIN))
2365 if (offload_running(adapter))
2367 if (copy_from_user(&m, useraddr, sizeof(m)))
2369 if (m.cmd != CHELSIO_SETMTUTAB)
2371 if (m.nmtus != NMTUS)
2373 if (m.mtus[0] < 81) /* accommodate SACK */
2376 /* MTUs must be in ascending order */
2377 for (i = 1; i < NMTUS; ++i)
2378 if (m.mtus[i] < m.mtus[i - 1])
2381 memcpy(adapter->params.mtus, m.mtus,
2382 sizeof(adapter->params.mtus));
2385 case CHELSIO_GET_PM:{
2386 struct tp_params *p = &adapter->params.tp;
2387 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2389 if (!is_offload(adapter))
2391 m.tx_pg_sz = p->tx_pg_size;
2392 m.tx_num_pg = p->tx_num_pgs;
2393 m.rx_pg_sz = p->rx_pg_size;
2394 m.rx_num_pg = p->rx_num_pgs;
2395 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2396 if (copy_to_user(useraddr, &m, sizeof(m)))
2400 case CHELSIO_SET_PM:{
2402 struct tp_params *p = &adapter->params.tp;
2404 if (!is_offload(adapter))
2406 if (!capable(CAP_NET_ADMIN))
2408 if (adapter->flags & FULL_INIT_DONE)
2410 if (copy_from_user(&m, useraddr, sizeof(m)))
2412 if (m.cmd != CHELSIO_SET_PM)
2414 if (!is_power_of_2(m.rx_pg_sz) ||
2415 !is_power_of_2(m.tx_pg_sz))
2416 return -EINVAL; /* not power of 2 */
2417 if (!(m.rx_pg_sz & 0x14000))
2418 return -EINVAL; /* not 16KB or 64KB */
2419 if (!(m.tx_pg_sz & 0x1554000))
2421 if (m.tx_num_pg == -1)
2422 m.tx_num_pg = p->tx_num_pgs;
2423 if (m.rx_num_pg == -1)
2424 m.rx_num_pg = p->rx_num_pgs;
2425 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2427 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2428 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2430 p->rx_pg_size = m.rx_pg_sz;
2431 p->tx_pg_size = m.tx_pg_sz;
2432 p->rx_num_pgs = m.rx_num_pg;
2433 p->tx_num_pgs = m.tx_num_pg;
2436 case CHELSIO_GET_MEM:{
2437 struct ch_mem_range t;
2441 if (!is_offload(adapter))
2443 if (!capable(CAP_NET_ADMIN))
2445 if (!(adapter->flags & FULL_INIT_DONE))
2446 return -EIO; /* need the memory controllers */
2447 if (copy_from_user(&t, useraddr, sizeof(t)))
2449 if (t.cmd != CHELSIO_GET_MEM)
2451 if ((t.addr & 7) || (t.len & 7))
2453 if (t.mem_id == MEM_CM)
2455 else if (t.mem_id == MEM_PMRX)
2456 mem = &adapter->pmrx;
2457 else if (t.mem_id == MEM_PMTX)
2458 mem = &adapter->pmtx;
2464 * bits 0..9: chip version
2465 * bits 10..15: chip revision
2467 t.version = 3 | (adapter->params.rev << 10);
2468 if (copy_to_user(useraddr, &t, sizeof(t)))
2472 * Read 256 bytes at a time as len can be large and we don't
2473 * want to use huge intermediate buffers.
2475 useraddr += sizeof(t); /* advance to start of buffer */
2477 unsigned int chunk =
2478 min_t(unsigned int, t.len, sizeof(buf));
2481 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2485 if (copy_to_user(useraddr, buf, chunk))
2493 case CHELSIO_SET_TRACE_FILTER:{
2495 const struct trace_params *tp;
2497 if (!capable(CAP_NET_ADMIN))
2499 if (!offload_running(adapter))
2501 if (copy_from_user(&t, useraddr, sizeof(t)))
2503 if (t.cmd != CHELSIO_SET_TRACE_FILTER)
2506 tp = (const struct trace_params *)&t.sip;
2508 t3_config_trace_filter(adapter, tp, 0,
2512 t3_config_trace_filter(adapter, tp, 1,
2523 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2525 struct mii_ioctl_data *data = if_mii(req);
2526 struct port_info *pi = netdev_priv(dev);
2527 struct adapter *adapter = pi->adapter;
2532 /* Convert phy_id from older PRTAD/DEVAD format */
2533 if (is_10G(adapter) &&
2534 !mdio_phy_id_is_c45(data->phy_id) &&
2535 (data->phy_id & 0x1f00) &&
2536 !(data->phy_id & 0xe0e0))
2537 data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2538 data->phy_id & 0x1f);
2541 return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
2547 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2549 struct port_info *pi = netdev_priv(dev);
2550 struct adapter *adapter = pi->adapter;
2553 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2556 init_port_mtus(adapter);
2557 if (adapter->params.rev == 0 && offload_running(adapter))
2558 t3_load_mtus(adapter, adapter->params.mtus,
2559 adapter->params.a_wnd, adapter->params.b_wnd,
2560 adapter->port[0]->mtu);
2564 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2566 struct port_info *pi = netdev_priv(dev);
2567 struct adapter *adapter = pi->adapter;
2568 struct sockaddr *addr = p;
2570 if (!is_valid_ether_addr(addr->sa_data))
2571 return -EADDRNOTAVAIL;
2573 eth_hw_addr_set(dev, addr->sa_data);
2574 t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
2575 if (offload_running(adapter))
2576 write_smt_entry(adapter, pi->port_id);
2580 static netdev_features_t cxgb_fix_features(struct net_device *dev,
2581 netdev_features_t features)
2584 * Since there is no support for separate rx/tx vlan accel
2585 * enable/disable make sure tx flag is always in same state as rx.
2587 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2588 features |= NETIF_F_HW_VLAN_CTAG_TX;
2590 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2595 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2597 netdev_features_t changed = dev->features ^ features;
2599 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2600 cxgb_vlan_mode(dev, features);
2605 #ifdef CONFIG_NET_POLL_CONTROLLER
2606 static void cxgb_netpoll(struct net_device *dev)
2608 struct port_info *pi = netdev_priv(dev);
2609 struct adapter *adapter = pi->adapter;
2612 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2613 struct sge_qset *qs = &adapter->sge.qs[qidx];
2616 if (adapter->flags & USING_MSIX)
2621 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2627 * Periodic accumulation of MAC statistics.
2629 static void mac_stats_update(struct adapter *adapter)
2633 for_each_port(adapter, i) {
2634 struct net_device *dev = adapter->port[i];
2635 struct port_info *p = netdev_priv(dev);
2637 if (netif_running(dev)) {
2638 spin_lock(&adapter->stats_lock);
2639 t3_mac_update_stats(&p->mac);
2640 spin_unlock(&adapter->stats_lock);
2645 static void check_link_status(struct adapter *adapter)
2649 for_each_port(adapter, i) {
2650 struct net_device *dev = adapter->port[i];
2651 struct port_info *p = netdev_priv(dev);
2654 spin_lock_irq(&adapter->work_lock);
2655 link_fault = p->link_fault;
2656 spin_unlock_irq(&adapter->work_lock);
2659 t3_link_fault(adapter, i);
2663 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2664 t3_xgm_intr_disable(adapter, i);
2665 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2667 t3_link_changed(adapter, i);
2668 t3_xgm_intr_enable(adapter, i);
2673 static void check_t3b2_mac(struct adapter *adapter)
2677 if (!rtnl_trylock()) /* synchronize with ifdown */
2680 for_each_port(adapter, i) {
2681 struct net_device *dev = adapter->port[i];
2682 struct port_info *p = netdev_priv(dev);
2685 if (!netif_running(dev))
2689 if (netif_running(dev) && netif_carrier_ok(dev))
2690 status = t3b2_mac_watchdog_task(&p->mac);
2692 p->mac.stats.num_toggled++;
2693 else if (status == 2) {
2694 struct cmac *mac = &p->mac;
2696 t3_mac_set_mtu(mac, dev->mtu);
2697 t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
2698 cxgb_set_rxmode(dev);
2699 t3_link_start(&p->phy, mac, &p->link_config);
2700 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2701 t3_port_intr_enable(adapter, p->port_id);
2702 p->mac.stats.num_resets++;
2709 static void t3_adap_check_task(struct work_struct *work)
2711 struct adapter *adapter = container_of(work, struct adapter,
2712 adap_check_task.work);
2713 const struct adapter_params *p = &adapter->params;
2715 unsigned int v, status, reset;
2717 adapter->check_task_cnt++;
2719 check_link_status(adapter);
2721 /* Accumulate MAC stats if needed */
2722 if (!p->linkpoll_period ||
2723 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2724 p->stats_update_period) {
2725 mac_stats_update(adapter);
2726 adapter->check_task_cnt = 0;
2729 if (p->rev == T3_REV_B2)
2730 check_t3b2_mac(adapter);
2733 * Scan the XGMAC's to check for various conditions which we want to
2734 * monitor in a periodic polling manner rather than via an interrupt
2735 * condition. This is used for conditions which would otherwise flood
2736 * the system with interrupts and we only really need to know that the
2737 * conditions are "happening" ... For each condition we count the
2738 * detection of the condition and reset it for the next polling loop.
2740 for_each_port(adapter, port) {
2741 struct cmac *mac = &adap2pinfo(adapter, port)->mac;
2744 cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2746 if (cause & F_RXFIFO_OVERFLOW) {
2747 mac->stats.rx_fifo_ovfl++;
2748 reset |= F_RXFIFO_OVERFLOW;
2751 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2755 * We do the same as above for FL_EMPTY interrupts.
2757 status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2760 if (status & F_FLEMPTY) {
2761 struct sge_qset *qs = &adapter->sge.qs[0];
2766 v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2770 qs->fl[i].empty += (v & 1);
2778 t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2780 /* Schedule the next check update if any port is active. */
2781 spin_lock_irq(&adapter->work_lock);
2782 if (adapter->open_device_map & PORT_MASK)
2783 schedule_chk_task(adapter);
2784 spin_unlock_irq(&adapter->work_lock);
2787 static void db_full_task(struct work_struct *work)
2789 struct adapter *adapter = container_of(work, struct adapter,
2792 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
2795 static void db_empty_task(struct work_struct *work)
2797 struct adapter *adapter = container_of(work, struct adapter,
2800 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
2803 static void db_drop_task(struct work_struct *work)
2805 struct adapter *adapter = container_of(work, struct adapter,
2807 unsigned long delay = 1000;
2810 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
2813 * Sleep a while before ringing the driver qset dbs.
2814 * The delay is between 1000-2023 usecs.
2816 get_random_bytes(&r, 2);
2818 set_current_state(TASK_UNINTERRUPTIBLE);
2819 schedule_timeout(usecs_to_jiffies(delay));
2824 * Processes external (PHY) interrupts in process context.
2826 static void ext_intr_task(struct work_struct *work)
2828 struct adapter *adapter = container_of(work, struct adapter,
2829 ext_intr_handler_task);
2832 /* Disable link fault interrupts */
2833 for_each_port(adapter, i) {
2834 struct net_device *dev = adapter->port[i];
2835 struct port_info *p = netdev_priv(dev);
2837 t3_xgm_intr_disable(adapter, i);
2838 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2841 /* Re-enable link fault interrupts */
2842 t3_phy_intr_handler(adapter);
2844 for_each_port(adapter, i)
2845 t3_xgm_intr_enable(adapter, i);
2847 /* Now reenable external interrupts */
2848 spin_lock_irq(&adapter->work_lock);
2849 if (adapter->slow_intr_mask) {
2850 adapter->slow_intr_mask |= F_T3DBG;
2851 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2852 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2853 adapter->slow_intr_mask);
2855 spin_unlock_irq(&adapter->work_lock);
2859 * Interrupt-context handler for external (PHY) interrupts.
2861 void t3_os_ext_intr_handler(struct adapter *adapter)
2864 * Schedule a task to handle external interrupts as they may be slow
2865 * and we use a mutex to protect MDIO registers. We disable PHY
2866 * interrupts in the meantime and let the task reenable them when
2869 spin_lock(&adapter->work_lock);
2870 if (adapter->slow_intr_mask) {
2871 adapter->slow_intr_mask &= ~F_T3DBG;
2872 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2873 adapter->slow_intr_mask);
2874 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2876 spin_unlock(&adapter->work_lock);
2879 void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2881 struct net_device *netdev = adapter->port[port_id];
2882 struct port_info *pi = netdev_priv(netdev);
2884 spin_lock(&adapter->work_lock);
2886 spin_unlock(&adapter->work_lock);
2889 static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq)
2893 if (is_offload(adapter) &&
2894 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2895 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2896 offload_close(&adapter->tdev);
2899 /* Stop all ports */
2900 for_each_port(adapter, i) {
2901 struct net_device *netdev = adapter->port[i];
2903 if (netif_running(netdev))
2904 __cxgb_close(netdev, on_wq);
2907 /* Stop SGE timers */
2908 t3_stop_sge_timers(adapter);
2910 adapter->flags &= ~FULL_INIT_DONE;
2913 ret = t3_reset_adapter(adapter);
2915 pci_disable_device(adapter->pdev);
2920 static int t3_reenable_adapter(struct adapter *adapter)
2922 if (pci_enable_device(adapter->pdev)) {
2923 dev_err(&adapter->pdev->dev,
2924 "Cannot re-enable PCI device after reset.\n");
2927 pci_set_master(adapter->pdev);
2928 pci_restore_state(adapter->pdev);
2929 pci_save_state(adapter->pdev);
2931 /* Free sge resources */
2932 t3_free_sge_resources(adapter);
2934 if (t3_replay_prep_adapter(adapter))
2942 static void t3_resume_ports(struct adapter *adapter)
2946 /* Restart the ports */
2947 for_each_port(adapter, i) {
2948 struct net_device *netdev = adapter->port[i];
2950 if (netif_running(netdev)) {
2951 if (cxgb_open(netdev)) {
2952 dev_err(&adapter->pdev->dev,
2953 "can't bring device back up"
2960 if (is_offload(adapter) && !ofld_disable)
2961 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2965 * processes a fatal error.
2966 * Bring the ports down, reset the chip, bring the ports back up.
2968 static void fatal_error_task(struct work_struct *work)
2970 struct adapter *adapter = container_of(work, struct adapter,
2971 fatal_error_handler_task);
2975 err = t3_adapter_error(adapter, 1, 1);
2977 err = t3_reenable_adapter(adapter);
2979 t3_resume_ports(adapter);
2981 CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2985 void t3_fatal_err(struct adapter *adapter)
2987 unsigned int fw_status[4];
2989 if (adapter->flags & FULL_INIT_DONE) {
2990 t3_sge_stop_dma(adapter);
2991 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2992 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2993 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2994 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2996 spin_lock(&adapter->work_lock);
2997 t3_intr_disable(adapter);
2998 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2999 spin_unlock(&adapter->work_lock);
3001 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
3002 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
3003 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
3004 fw_status[0], fw_status[1],
3005 fw_status[2], fw_status[3]);
3009 * t3_io_error_detected - called when PCI error is detected
3010 * @pdev: Pointer to PCI device
3011 * @state: The current pci connection state
3013 * This function is called after a PCI bus error affecting
3014 * this device has been detected.
3016 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
3017 pci_channel_state_t state)
3019 struct adapter *adapter = pci_get_drvdata(pdev);
3021 if (state == pci_channel_io_perm_failure)
3022 return PCI_ERS_RESULT_DISCONNECT;
3024 t3_adapter_error(adapter, 0, 0);
3026 /* Request a slot reset. */
3027 return PCI_ERS_RESULT_NEED_RESET;
3031 * t3_io_slot_reset - called after the pci bus has been reset.
3032 * @pdev: Pointer to PCI device
3034 * Restart the card from scratch, as if from a cold-boot.
3036 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
3038 struct adapter *adapter = pci_get_drvdata(pdev);
3040 if (!t3_reenable_adapter(adapter))
3041 return PCI_ERS_RESULT_RECOVERED;
3043 return PCI_ERS_RESULT_DISCONNECT;
3047 * t3_io_resume - called when traffic can start flowing again.
3048 * @pdev: Pointer to PCI device
3050 * This callback is called when the error recovery driver tells us that
3051 * its OK to resume normal operation.
3053 static void t3_io_resume(struct pci_dev *pdev)
3055 struct adapter *adapter = pci_get_drvdata(pdev);
3057 CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
3058 t3_read_reg(adapter, A_PCIE_PEX_ERR));
3061 t3_resume_ports(adapter);
3065 static const struct pci_error_handlers t3_err_handler = {
3066 .error_detected = t3_io_error_detected,
3067 .slot_reset = t3_io_slot_reset,
3068 .resume = t3_io_resume,
3072 * Set the number of qsets based on the number of CPUs and the number of ports,
3073 * not to exceed the number of available qsets, assuming there are enough qsets
3076 static void set_nqsets(struct adapter *adap)
3079 int num_cpus = netif_get_num_default_rss_queues();
3080 int hwports = adap->params.nports;
3081 int nqsets = adap->msix_nvectors - 1;
3083 if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
3085 (hwports * nqsets > SGE_QSETS ||
3086 num_cpus >= nqsets / hwports))
3088 if (nqsets > num_cpus)
3090 if (nqsets < 1 || hwports == 4)
3096 for_each_port(adap, i) {
3097 struct port_info *pi = adap2pinfo(adap, i);
3100 pi->nqsets = nqsets;
3101 j = pi->first_qset + nqsets;
3103 dev_info(&adap->pdev->dev,
3104 "Port %d using %d queue sets.\n", i, nqsets);
3108 static int cxgb_enable_msix(struct adapter *adap)
3110 struct msix_entry entries[SGE_QSETS + 1];
3114 vectors = ARRAY_SIZE(entries);
3115 for (i = 0; i < vectors; ++i)
3116 entries[i].entry = i;
3118 vectors = pci_enable_msix_range(adap->pdev, entries,
3119 adap->params.nports + 1, vectors);
3123 for (i = 0; i < vectors; ++i)
3124 adap->msix_info[i].vec = entries[i].vector;
3125 adap->msix_nvectors = vectors;
3130 static void print_port_info(struct adapter *adap, const struct adapter_info *ai)
3132 static const char *pci_variant[] = {
3133 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
3140 snprintf(buf, sizeof(buf), "%s x%d",
3141 pci_variant[adap->params.pci.variant],
3142 adap->params.pci.width);
3144 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
3145 pci_variant[adap->params.pci.variant],
3146 adap->params.pci.speed, adap->params.pci.width);
3148 for_each_port(adap, i) {
3149 struct net_device *dev = adap->port[i];
3150 const struct port_info *pi = netdev_priv(dev);
3152 if (!test_bit(i, &adap->registered_device_map))
3154 netdev_info(dev, "%s %s %sNIC (rev %d) %s%s\n",
3155 ai->desc, pi->phy.desc,
3156 is_offload(adap) ? "R" : "", adap->params.rev, buf,
3157 (adap->flags & USING_MSIX) ? " MSI-X" :
3158 (adap->flags & USING_MSI) ? " MSI" : "");
3159 if (adap->name == dev->name && adap->params.vpd.mclk)
3160 pr_info("%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
3161 adap->name, t3_mc7_size(&adap->cm) >> 20,
3162 t3_mc7_size(&adap->pmtx) >> 20,
3163 t3_mc7_size(&adap->pmrx) >> 20,
3164 adap->params.vpd.sn);
3168 static const struct net_device_ops cxgb_netdev_ops = {
3169 .ndo_open = cxgb_open,
3170 .ndo_stop = cxgb_close,
3171 .ndo_start_xmit = t3_eth_xmit,
3172 .ndo_get_stats = cxgb_get_stats,
3173 .ndo_validate_addr = eth_validate_addr,
3174 .ndo_set_rx_mode = cxgb_set_rxmode,
3175 .ndo_eth_ioctl = cxgb_ioctl,
3176 .ndo_siocdevprivate = cxgb_siocdevprivate,
3177 .ndo_change_mtu = cxgb_change_mtu,
3178 .ndo_set_mac_address = cxgb_set_mac_addr,
3179 .ndo_fix_features = cxgb_fix_features,
3180 .ndo_set_features = cxgb_set_features,
3181 #ifdef CONFIG_NET_POLL_CONTROLLER
3182 .ndo_poll_controller = cxgb_netpoll,
3186 static void cxgb3_init_iscsi_mac(struct net_device *dev)
3188 struct port_info *pi = netdev_priv(dev);
3190 memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN);
3191 pi->iscsic.mac_addr[3] |= 0x80;
3194 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
3195 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
3196 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3197 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3200 resource_size_t mmio_start, mmio_len;
3201 const struct adapter_info *ai;
3202 struct adapter *adapter = NULL;
3203 struct port_info *pi;
3206 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3208 pr_err("cannot initialize work queue\n");
3213 err = pci_enable_device(pdev);
3215 dev_err(&pdev->dev, "cannot enable PCI device\n");
3219 err = pci_request_regions(pdev, DRV_NAME);
3221 /* Just info, some other driver may have claimed the device. */
3222 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3223 goto out_disable_device;
3226 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3228 dev_err(&pdev->dev, "no usable DMA configuration\n");
3229 goto out_release_regions;
3232 pci_set_master(pdev);
3233 pci_save_state(pdev);
3235 mmio_start = pci_resource_start(pdev, 0);
3236 mmio_len = pci_resource_len(pdev, 0);
3237 ai = t3_get_adapter_info(ent->driver_data);
3239 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3242 goto out_release_regions;
3245 adapter->nofail_skb =
3246 alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3247 if (!adapter->nofail_skb) {
3248 dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3250 goto out_free_adapter;
3253 adapter->regs = ioremap(mmio_start, mmio_len);
3254 if (!adapter->regs) {
3255 dev_err(&pdev->dev, "cannot map device registers\n");
3257 goto out_free_adapter_nofail;
3260 adapter->pdev = pdev;
3261 adapter->name = pci_name(pdev);
3262 adapter->msg_enable = dflt_msg_enable;
3263 adapter->mmio_len = mmio_len;
3265 mutex_init(&adapter->mdio_lock);
3266 spin_lock_init(&adapter->work_lock);
3267 spin_lock_init(&adapter->stats_lock);
3269 INIT_LIST_HEAD(&adapter->adapter_list);
3270 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3271 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3273 INIT_WORK(&adapter->db_full_task, db_full_task);
3274 INIT_WORK(&adapter->db_empty_task, db_empty_task);
3275 INIT_WORK(&adapter->db_drop_task, db_drop_task);
3277 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3279 for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3280 struct net_device *netdev;
3282 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3288 SET_NETDEV_DEV(netdev, &pdev->dev);
3290 adapter->port[i] = netdev;
3291 pi = netdev_priv(netdev);
3292 pi->adapter = adapter;
3294 netif_carrier_off(netdev);
3295 netdev->irq = pdev->irq;
3296 netdev->mem_start = mmio_start;
3297 netdev->mem_end = mmio_start + mmio_len - 1;
3298 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
3299 NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
3300 netdev->features |= netdev->hw_features |
3301 NETIF_F_HW_VLAN_CTAG_TX;
3302 netdev->vlan_features |= netdev->features & VLAN_FEAT;
3304 netdev->features |= NETIF_F_HIGHDMA;
3306 netdev->netdev_ops = &cxgb_netdev_ops;
3307 netdev->ethtool_ops = &cxgb_ethtool_ops;
3308 netdev->min_mtu = 81;
3309 netdev->max_mtu = ETH_MAX_MTU;
3310 netdev->dev_port = pi->port_id;
3313 pci_set_drvdata(pdev, adapter);
3314 if (t3_prep_adapter(adapter, ai, 1) < 0) {
3320 * The card is now ready to go. If any errors occur during device
3321 * registration we do not fail the whole card but rather proceed only
3322 * with the ports we manage to register successfully. However we must
3323 * register at least one net device.
3325 for_each_port(adapter, i) {
3326 err = register_netdev(adapter->port[i]);
3328 dev_warn(&pdev->dev,
3329 "cannot register net device %s, skipping\n",
3330 adapter->port[i]->name);
3333 * Change the name we use for messages to the name of
3334 * the first successfully registered interface.
3336 if (!adapter->registered_device_map)
3337 adapter->name = adapter->port[i]->name;
3339 __set_bit(i, &adapter->registered_device_map);
3342 if (!adapter->registered_device_map) {
3343 dev_err(&pdev->dev, "could not register any net devices\n");
3348 for_each_port(adapter, i)
3349 cxgb3_init_iscsi_mac(adapter->port[i]);
3351 /* Driver's ready. Reflect it on LEDs */
3352 t3_led_ready(adapter);
3354 if (is_offload(adapter)) {
3355 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3356 cxgb3_adapter_ofld(adapter);
3359 /* See what interrupts we'll be using */
3360 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3361 adapter->flags |= USING_MSIX;
3362 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3363 adapter->flags |= USING_MSI;
3365 set_nqsets(adapter);
3367 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3370 dev_err(&pdev->dev, "cannot create sysfs group\n");
3374 print_port_info(adapter, ai);
3378 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
3381 iounmap(adapter->regs);
3382 for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3383 if (adapter->port[i])
3384 free_netdev(adapter->port[i]);
3386 out_free_adapter_nofail:
3387 kfree_skb(adapter->nofail_skb);
3392 out_release_regions:
3393 pci_release_regions(pdev);
3395 pci_disable_device(pdev);
3400 static void remove_one(struct pci_dev *pdev)
3402 struct adapter *adapter = pci_get_drvdata(pdev);
3407 t3_sge_stop(adapter);
3408 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3411 if (is_offload(adapter)) {
3412 cxgb3_adapter_unofld(adapter);
3413 if (test_bit(OFFLOAD_DEVMAP_BIT,
3414 &adapter->open_device_map))
3415 offload_close(&adapter->tdev);
3418 for_each_port(adapter, i)
3419 if (test_bit(i, &adapter->registered_device_map))
3420 unregister_netdev(adapter->port[i]);
3422 t3_stop_sge_timers(adapter);
3423 t3_free_sge_resources(adapter);
3424 cxgb_disable_msi(adapter);
3426 for_each_port(adapter, i)
3427 if (adapter->port[i])
3428 free_netdev(adapter->port[i]);
3430 iounmap(adapter->regs);
3431 kfree_skb(adapter->nofail_skb);
3433 pci_release_regions(pdev);
3434 pci_disable_device(pdev);
3438 static struct pci_driver driver = {
3440 .id_table = cxgb3_pci_tbl,
3442 .remove = remove_one,
3443 .err_handler = &t3_err_handler,
3446 static int __init cxgb3_init_module(void)
3450 cxgb3_offload_init();
3452 ret = pci_register_driver(&driver);
3456 static void __exit cxgb3_cleanup_module(void)
3458 pci_unregister_driver(&driver);
3460 destroy_workqueue(cxgb3_wq);
3463 module_init(cxgb3_init_module);
3464 module_exit(cxgb3_cleanup_module);