GNU Linux-libre 4.9.317-gnu1
[releases.git] / drivers / net / ethernet / chelsio / cxgb3 / cxgb3_main.c
1 /*
2  * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/init.h>
38 #include <linux/pci.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/if_vlan.h>
43 #include <linux/mdio.h>
44 #include <linux/sockios.h>
45 #include <linux/workqueue.h>
46 #include <linux/proc_fs.h>
47 #include <linux/rtnetlink.h>
48 #include <linux/firmware.h>
49 #include <linux/log2.h>
50 #include <linux/stringify.h>
51 #include <linux/sched.h>
52 #include <linux/slab.h>
53 #include <linux/nospec.h>
54 #include <asm/uaccess.h>
55
56 #include "common.h"
57 #include "cxgb3_ioctl.h"
58 #include "regs.h"
59 #include "cxgb3_offload.h"
60 #include "version.h"
61
62 #include "cxgb3_ctl_defs.h"
63 #include "t3_cpl.h"
64 #include "firmware_exports.h"
65
66 enum {
67         MAX_TXQ_ENTRIES = 16384,
68         MAX_CTRL_TXQ_ENTRIES = 1024,
69         MAX_RSPQ_ENTRIES = 16384,
70         MAX_RX_BUFFERS = 16384,
71         MAX_RX_JUMBO_BUFFERS = 16384,
72         MIN_TXQ_ENTRIES = 4,
73         MIN_CTRL_TXQ_ENTRIES = 4,
74         MIN_RSPQ_ENTRIES = 32,
75         MIN_FL_ENTRIES = 32
76 };
77
78 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
79
80 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
81                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
82                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
83
84 #define EEPROM_MAGIC 0x38E2F10C
85
86 #define CH_DEVICE(devid, idx) \
87         { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
88
89 static const struct pci_device_id cxgb3_pci_tbl[] = {
90         CH_DEVICE(0x20, 0),     /* PE9000 */
91         CH_DEVICE(0x21, 1),     /* T302E */
92         CH_DEVICE(0x22, 2),     /* T310E */
93         CH_DEVICE(0x23, 3),     /* T320X */
94         CH_DEVICE(0x24, 1),     /* T302X */
95         CH_DEVICE(0x25, 3),     /* T320E */
96         CH_DEVICE(0x26, 2),     /* T310X */
97         CH_DEVICE(0x30, 2),     /* T3B10 */
98         CH_DEVICE(0x31, 3),     /* T3B20 */
99         CH_DEVICE(0x32, 1),     /* T3B02 */
100         CH_DEVICE(0x35, 6),     /* T3C20-derived T3C10 */
101         CH_DEVICE(0x36, 3),     /* S320E-CR */
102         CH_DEVICE(0x37, 7),     /* N320E-G2 */
103         {0,}
104 };
105
106 MODULE_DESCRIPTION(DRV_DESC);
107 MODULE_AUTHOR("Chelsio Communications");
108 MODULE_LICENSE("Dual BSD/GPL");
109 MODULE_VERSION(DRV_VERSION);
110 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
111
112 static int dflt_msg_enable = DFLT_MSG_ENABLE;
113
114 module_param(dflt_msg_enable, int, 0644);
115 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
116
117 /*
118  * The driver uses the best interrupt scheme available on a platform in the
119  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
120  * of these schemes the driver may consider as follows:
121  *
122  * msi = 2: choose from among all three options
123  * msi = 1: only consider MSI and pin interrupts
124  * msi = 0: force pin interrupts
125  */
126 static int msi = 2;
127
128 module_param(msi, int, 0644);
129 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
130
131 /*
132  * The driver enables offload as a default.
133  * To disable it, use ofld_disable = 1.
134  */
135
136 static int ofld_disable = 0;
137
138 module_param(ofld_disable, int, 0644);
139 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
140
141 /*
142  * We have work elements that we need to cancel when an interface is taken
143  * down.  Normally the work elements would be executed by keventd but that
144  * can deadlock because of linkwatch.  If our close method takes the rtnl
145  * lock and linkwatch is ahead of our work elements in keventd, linkwatch
146  * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
147  * for our work to complete.  Get our own work queue to solve this.
148  */
149 struct workqueue_struct *cxgb3_wq;
150
151 /**
152  *      link_report - show link status and link speed/duplex
153  *      @p: the port whose settings are to be reported
154  *
155  *      Shows the link status, speed, and duplex of a port.
156  */
157 static void link_report(struct net_device *dev)
158 {
159         if (!netif_carrier_ok(dev))
160                 netdev_info(dev, "link down\n");
161         else {
162                 const char *s = "10Mbps";
163                 const struct port_info *p = netdev_priv(dev);
164
165                 switch (p->link_config.speed) {
166                 case SPEED_10000:
167                         s = "10Gbps";
168                         break;
169                 case SPEED_1000:
170                         s = "1000Mbps";
171                         break;
172                 case SPEED_100:
173                         s = "100Mbps";
174                         break;
175                 }
176
177                 netdev_info(dev, "link up, %s, %s-duplex\n",
178                             s, p->link_config.duplex == DUPLEX_FULL
179                             ? "full" : "half");
180         }
181 }
182
183 static void enable_tx_fifo_drain(struct adapter *adapter,
184                                  struct port_info *pi)
185 {
186         t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0,
187                          F_ENDROPPKT);
188         t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0);
189         t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN);
190         t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN);
191 }
192
193 static void disable_tx_fifo_drain(struct adapter *adapter,
194                                   struct port_info *pi)
195 {
196         t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
197                          F_ENDROPPKT, 0);
198 }
199
200 void t3_os_link_fault(struct adapter *adap, int port_id, int state)
201 {
202         struct net_device *dev = adap->port[port_id];
203         struct port_info *pi = netdev_priv(dev);
204
205         if (state == netif_carrier_ok(dev))
206                 return;
207
208         if (state) {
209                 struct cmac *mac = &pi->mac;
210
211                 netif_carrier_on(dev);
212
213                 disable_tx_fifo_drain(adap, pi);
214
215                 /* Clear local faults */
216                 t3_xgm_intr_disable(adap, pi->port_id);
217                 t3_read_reg(adap, A_XGM_INT_STATUS +
218                                     pi->mac.offset);
219                 t3_write_reg(adap,
220                              A_XGM_INT_CAUSE + pi->mac.offset,
221                              F_XGM_INT);
222
223                 t3_set_reg_field(adap,
224                                  A_XGM_INT_ENABLE +
225                                  pi->mac.offset,
226                                  F_XGM_INT, F_XGM_INT);
227                 t3_xgm_intr_enable(adap, pi->port_id);
228
229                 t3_mac_enable(mac, MAC_DIRECTION_TX);
230         } else {
231                 netif_carrier_off(dev);
232
233                 /* Flush TX FIFO */
234                 enable_tx_fifo_drain(adap, pi);
235         }
236         link_report(dev);
237 }
238
239 /**
240  *      t3_os_link_changed - handle link status changes
241  *      @adapter: the adapter associated with the link change
242  *      @port_id: the port index whose limk status has changed
243  *      @link_stat: the new status of the link
244  *      @speed: the new speed setting
245  *      @duplex: the new duplex setting
246  *      @pause: the new flow-control setting
247  *
248  *      This is the OS-dependent handler for link status changes.  The OS
249  *      neutral handler takes care of most of the processing for these events,
250  *      then calls this handler for any OS-specific processing.
251  */
252 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
253                         int speed, int duplex, int pause)
254 {
255         struct net_device *dev = adapter->port[port_id];
256         struct port_info *pi = netdev_priv(dev);
257         struct cmac *mac = &pi->mac;
258
259         /* Skip changes from disabled ports. */
260         if (!netif_running(dev))
261                 return;
262
263         if (link_stat != netif_carrier_ok(dev)) {
264                 if (link_stat) {
265                         disable_tx_fifo_drain(adapter, pi);
266
267                         t3_mac_enable(mac, MAC_DIRECTION_RX);
268
269                         /* Clear local faults */
270                         t3_xgm_intr_disable(adapter, pi->port_id);
271                         t3_read_reg(adapter, A_XGM_INT_STATUS +
272                                     pi->mac.offset);
273                         t3_write_reg(adapter,
274                                      A_XGM_INT_CAUSE + pi->mac.offset,
275                                      F_XGM_INT);
276
277                         t3_set_reg_field(adapter,
278                                          A_XGM_INT_ENABLE + pi->mac.offset,
279                                          F_XGM_INT, F_XGM_INT);
280                         t3_xgm_intr_enable(adapter, pi->port_id);
281
282                         netif_carrier_on(dev);
283                 } else {
284                         netif_carrier_off(dev);
285
286                         t3_xgm_intr_disable(adapter, pi->port_id);
287                         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
288                         t3_set_reg_field(adapter,
289                                          A_XGM_INT_ENABLE + pi->mac.offset,
290                                          F_XGM_INT, 0);
291
292                         if (is_10G(adapter))
293                                 pi->phy.ops->power_down(&pi->phy, 1);
294
295                         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
296                         t3_mac_disable(mac, MAC_DIRECTION_RX);
297                         t3_link_start(&pi->phy, mac, &pi->link_config);
298
299                         /* Flush TX FIFO */
300                         enable_tx_fifo_drain(adapter, pi);
301                 }
302
303                 link_report(dev);
304         }
305 }
306
307 /**
308  *      t3_os_phymod_changed - handle PHY module changes
309  *      @phy: the PHY reporting the module change
310  *      @mod_type: new module type
311  *
312  *      This is the OS-dependent handler for PHY module changes.  It is
313  *      invoked when a PHY module is removed or inserted for any OS-specific
314  *      processing.
315  */
316 void t3_os_phymod_changed(struct adapter *adap, int port_id)
317 {
318         static const char *mod_str[] = {
319                 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
320         };
321
322         const struct net_device *dev = adap->port[port_id];
323         const struct port_info *pi = netdev_priv(dev);
324
325         if (pi->phy.modtype == phy_modtype_none)
326                 netdev_info(dev, "PHY module unplugged\n");
327         else
328                 netdev_info(dev, "%s PHY module inserted\n",
329                             mod_str[pi->phy.modtype]);
330 }
331
332 static void cxgb_set_rxmode(struct net_device *dev)
333 {
334         struct port_info *pi = netdev_priv(dev);
335
336         t3_mac_set_rx_mode(&pi->mac, dev);
337 }
338
339 /**
340  *      link_start - enable a port
341  *      @dev: the device to enable
342  *
343  *      Performs the MAC and PHY actions needed to enable a port.
344  */
345 static void link_start(struct net_device *dev)
346 {
347         struct port_info *pi = netdev_priv(dev);
348         struct cmac *mac = &pi->mac;
349
350         t3_mac_reset(mac);
351         t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
352         t3_mac_set_mtu(mac, dev->mtu);
353         t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
354         t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
355         t3_mac_set_rx_mode(mac, dev);
356         t3_link_start(&pi->phy, mac, &pi->link_config);
357         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
358 }
359
360 static inline void cxgb_disable_msi(struct adapter *adapter)
361 {
362         if (adapter->flags & USING_MSIX) {
363                 pci_disable_msix(adapter->pdev);
364                 adapter->flags &= ~USING_MSIX;
365         } else if (adapter->flags & USING_MSI) {
366                 pci_disable_msi(adapter->pdev);
367                 adapter->flags &= ~USING_MSI;
368         }
369 }
370
371 /*
372  * Interrupt handler for asynchronous events used with MSI-X.
373  */
374 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
375 {
376         t3_slow_intr_handler(cookie);
377         return IRQ_HANDLED;
378 }
379
380 /*
381  * Name the MSI-X interrupts.
382  */
383 static void name_msix_vecs(struct adapter *adap)
384 {
385         int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
386
387         snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
388         adap->msix_info[0].desc[n] = 0;
389
390         for_each_port(adap, j) {
391                 struct net_device *d = adap->port[j];
392                 const struct port_info *pi = netdev_priv(d);
393
394                 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
395                         snprintf(adap->msix_info[msi_idx].desc, n,
396                                  "%s-%d", d->name, pi->first_qset + i);
397                         adap->msix_info[msi_idx].desc[n] = 0;
398                 }
399         }
400 }
401
402 static int request_msix_data_irqs(struct adapter *adap)
403 {
404         int i, j, err, qidx = 0;
405
406         for_each_port(adap, i) {
407                 int nqsets = adap2pinfo(adap, i)->nqsets;
408
409                 for (j = 0; j < nqsets; ++j) {
410                         err = request_irq(adap->msix_info[qidx + 1].vec,
411                                           t3_intr_handler(adap,
412                                                           adap->sge.qs[qidx].
413                                                           rspq.polling), 0,
414                                           adap->msix_info[qidx + 1].desc,
415                                           &adap->sge.qs[qidx]);
416                         if (err) {
417                                 while (--qidx >= 0)
418                                         free_irq(adap->msix_info[qidx + 1].vec,
419                                                  &adap->sge.qs[qidx]);
420                                 return err;
421                         }
422                         qidx++;
423                 }
424         }
425         return 0;
426 }
427
428 static void free_irq_resources(struct adapter *adapter)
429 {
430         if (adapter->flags & USING_MSIX) {
431                 int i, n = 0;
432
433                 free_irq(adapter->msix_info[0].vec, adapter);
434                 for_each_port(adapter, i)
435                         n += adap2pinfo(adapter, i)->nqsets;
436
437                 for (i = 0; i < n; ++i)
438                         free_irq(adapter->msix_info[i + 1].vec,
439                                  &adapter->sge.qs[i]);
440         } else
441                 free_irq(adapter->pdev->irq, adapter);
442 }
443
444 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
445                               unsigned long n)
446 {
447         int attempts = 10;
448
449         while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
450                 if (!--attempts)
451                         return -ETIMEDOUT;
452                 msleep(10);
453         }
454         return 0;
455 }
456
457 static int init_tp_parity(struct adapter *adap)
458 {
459         int i;
460         struct sk_buff *skb;
461         struct cpl_set_tcb_field *greq;
462         unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
463
464         t3_tp_set_offload_mode(adap, 1);
465
466         for (i = 0; i < 16; i++) {
467                 struct cpl_smt_write_req *req;
468
469                 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
470                 if (!skb)
471                         skb = adap->nofail_skb;
472                 if (!skb)
473                         goto alloc_skb_fail;
474
475                 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
476                 memset(req, 0, sizeof(*req));
477                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
478                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
479                 req->mtu_idx = NMTUS - 1;
480                 req->iff = i;
481                 t3_mgmt_tx(adap, skb);
482                 if (skb == adap->nofail_skb) {
483                         await_mgmt_replies(adap, cnt, i + 1);
484                         adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
485                         if (!adap->nofail_skb)
486                                 goto alloc_skb_fail;
487                 }
488         }
489
490         for (i = 0; i < 2048; i++) {
491                 struct cpl_l2t_write_req *req;
492
493                 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
494                 if (!skb)
495                         skb = adap->nofail_skb;
496                 if (!skb)
497                         goto alloc_skb_fail;
498
499                 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
500                 memset(req, 0, sizeof(*req));
501                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
502                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
503                 req->params = htonl(V_L2T_W_IDX(i));
504                 t3_mgmt_tx(adap, skb);
505                 if (skb == adap->nofail_skb) {
506                         await_mgmt_replies(adap, cnt, 16 + i + 1);
507                         adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
508                         if (!adap->nofail_skb)
509                                 goto alloc_skb_fail;
510                 }
511         }
512
513         for (i = 0; i < 2048; i++) {
514                 struct cpl_rte_write_req *req;
515
516                 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
517                 if (!skb)
518                         skb = adap->nofail_skb;
519                 if (!skb)
520                         goto alloc_skb_fail;
521
522                 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
523                 memset(req, 0, sizeof(*req));
524                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
525                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
526                 req->l2t_idx = htonl(V_L2T_W_IDX(i));
527                 t3_mgmt_tx(adap, skb);
528                 if (skb == adap->nofail_skb) {
529                         await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
530                         adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
531                         if (!adap->nofail_skb)
532                                 goto alloc_skb_fail;
533                 }
534         }
535
536         skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
537         if (!skb)
538                 skb = adap->nofail_skb;
539         if (!skb)
540                 goto alloc_skb_fail;
541
542         greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
543         memset(greq, 0, sizeof(*greq));
544         greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
545         OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
546         greq->mask = cpu_to_be64(1);
547         t3_mgmt_tx(adap, skb);
548
549         i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
550         if (skb == adap->nofail_skb) {
551                 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
552                 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
553         }
554
555         t3_tp_set_offload_mode(adap, 0);
556         return i;
557
558 alloc_skb_fail:
559         t3_tp_set_offload_mode(adap, 0);
560         return -ENOMEM;
561 }
562
563 /**
564  *      setup_rss - configure RSS
565  *      @adap: the adapter
566  *
567  *      Sets up RSS to distribute packets to multiple receive queues.  We
568  *      configure the RSS CPU lookup table to distribute to the number of HW
569  *      receive queues, and the response queue lookup table to narrow that
570  *      down to the response queues actually configured for each port.
571  *      We always configure the RSS mapping for two ports since the mapping
572  *      table has plenty of entries.
573  */
574 static void setup_rss(struct adapter *adap)
575 {
576         int i;
577         unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
578         unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
579         u8 cpus[SGE_QSETS + 1];
580         u16 rspq_map[RSS_TABLE_SIZE + 1];
581
582         for (i = 0; i < SGE_QSETS; ++i)
583                 cpus[i] = i;
584         cpus[SGE_QSETS] = 0xff; /* terminator */
585
586         for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
587                 rspq_map[i] = i % nq0;
588                 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
589         }
590         rspq_map[RSS_TABLE_SIZE] = 0xffff; /* terminator */
591
592         t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
593                       F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
594                       V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
595 }
596
597 static void ring_dbs(struct adapter *adap)
598 {
599         int i, j;
600
601         for (i = 0; i < SGE_QSETS; i++) {
602                 struct sge_qset *qs = &adap->sge.qs[i];
603
604                 if (qs->adap)
605                         for (j = 0; j < SGE_TXQ_PER_SET; j++)
606                                 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
607         }
608 }
609
610 static void init_napi(struct adapter *adap)
611 {
612         int i;
613
614         for (i = 0; i < SGE_QSETS; i++) {
615                 struct sge_qset *qs = &adap->sge.qs[i];
616
617                 if (qs->adap)
618                         netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
619                                        64);
620         }
621
622         /*
623          * netif_napi_add() can be called only once per napi_struct because it
624          * adds each new napi_struct to a list.  Be careful not to call it a
625          * second time, e.g., during EEH recovery, by making a note of it.
626          */
627         adap->flags |= NAPI_INIT;
628 }
629
630 /*
631  * Wait until all NAPI handlers are descheduled.  This includes the handlers of
632  * both netdevices representing interfaces and the dummy ones for the extra
633  * queues.
634  */
635 static void quiesce_rx(struct adapter *adap)
636 {
637         int i;
638
639         for (i = 0; i < SGE_QSETS; i++)
640                 if (adap->sge.qs[i].adap)
641                         napi_disable(&adap->sge.qs[i].napi);
642 }
643
644 static void enable_all_napi(struct adapter *adap)
645 {
646         int i;
647         for (i = 0; i < SGE_QSETS; i++)
648                 if (adap->sge.qs[i].adap)
649                         napi_enable(&adap->sge.qs[i].napi);
650 }
651
652 /**
653  *      setup_sge_qsets - configure SGE Tx/Rx/response queues
654  *      @adap: the adapter
655  *
656  *      Determines how many sets of SGE queues to use and initializes them.
657  *      We support multiple queue sets per port if we have MSI-X, otherwise
658  *      just one queue set per port.
659  */
660 static int setup_sge_qsets(struct adapter *adap)
661 {
662         int i, j, err, irq_idx = 0, qset_idx = 0;
663         unsigned int ntxq = SGE_TXQ_PER_SET;
664
665         if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
666                 irq_idx = -1;
667
668         for_each_port(adap, i) {
669                 struct net_device *dev = adap->port[i];
670                 struct port_info *pi = netdev_priv(dev);
671
672                 pi->qs = &adap->sge.qs[pi->first_qset];
673                 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
674                         err = t3_sge_alloc_qset(adap, qset_idx, 1,
675                                 (adap->flags & USING_MSIX) ? qset_idx + 1 :
676                                                              irq_idx,
677                                 &adap->params.sge.qset[qset_idx], ntxq, dev,
678                                 netdev_get_tx_queue(dev, j));
679                         if (err) {
680                                 t3_free_sge_resources(adap);
681                                 return err;
682                         }
683                 }
684         }
685
686         return 0;
687 }
688
689 static ssize_t attr_show(struct device *d, char *buf,
690                          ssize_t(*format) (struct net_device *, char *))
691 {
692         ssize_t len;
693
694         /* Synchronize with ioctls that may shut down the device */
695         rtnl_lock();
696         len = (*format) (to_net_dev(d), buf);
697         rtnl_unlock();
698         return len;
699 }
700
701 static ssize_t attr_store(struct device *d,
702                           const char *buf, size_t len,
703                           ssize_t(*set) (struct net_device *, unsigned int),
704                           unsigned int min_val, unsigned int max_val)
705 {
706         ssize_t ret;
707         unsigned int val;
708
709         if (!capable(CAP_NET_ADMIN))
710                 return -EPERM;
711
712         ret = kstrtouint(buf, 0, &val);
713         if (ret)
714                 return ret;
715         if (val < min_val || val > max_val)
716                 return -EINVAL;
717
718         rtnl_lock();
719         ret = (*set) (to_net_dev(d), val);
720         if (!ret)
721                 ret = len;
722         rtnl_unlock();
723         return ret;
724 }
725
726 #define CXGB3_SHOW(name, val_expr) \
727 static ssize_t format_##name(struct net_device *dev, char *buf) \
728 { \
729         struct port_info *pi = netdev_priv(dev); \
730         struct adapter *adap = pi->adapter; \
731         return sprintf(buf, "%u\n", val_expr); \
732 } \
733 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
734                            char *buf) \
735 { \
736         return attr_show(d, buf, format_##name); \
737 }
738
739 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
740 {
741         struct port_info *pi = netdev_priv(dev);
742         struct adapter *adap = pi->adapter;
743         int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
744
745         if (adap->flags & FULL_INIT_DONE)
746                 return -EBUSY;
747         if (val && adap->params.rev == 0)
748                 return -EINVAL;
749         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
750             min_tids)
751                 return -EINVAL;
752         adap->params.mc5.nfilters = val;
753         return 0;
754 }
755
756 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
757                               const char *buf, size_t len)
758 {
759         return attr_store(d, buf, len, set_nfilters, 0, ~0);
760 }
761
762 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
763 {
764         struct port_info *pi = netdev_priv(dev);
765         struct adapter *adap = pi->adapter;
766
767         if (adap->flags & FULL_INIT_DONE)
768                 return -EBUSY;
769         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
770             MC5_MIN_TIDS)
771                 return -EINVAL;
772         adap->params.mc5.nservers = val;
773         return 0;
774 }
775
776 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
777                               const char *buf, size_t len)
778 {
779         return attr_store(d, buf, len, set_nservers, 0, ~0);
780 }
781
782 #define CXGB3_ATTR_R(name, val_expr) \
783 CXGB3_SHOW(name, val_expr) \
784 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
785
786 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
787 CXGB3_SHOW(name, val_expr) \
788 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
789
790 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
791 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
792 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
793
794 static struct attribute *cxgb3_attrs[] = {
795         &dev_attr_cam_size.attr,
796         &dev_attr_nfilters.attr,
797         &dev_attr_nservers.attr,
798         NULL
799 };
800
801 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
802
803 static ssize_t tm_attr_show(struct device *d,
804                             char *buf, int sched)
805 {
806         struct port_info *pi = netdev_priv(to_net_dev(d));
807         struct adapter *adap = pi->adapter;
808         unsigned int v, addr, bpt, cpt;
809         ssize_t len;
810
811         addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
812         rtnl_lock();
813         t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
814         v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
815         if (sched & 1)
816                 v >>= 16;
817         bpt = (v >> 8) & 0xff;
818         cpt = v & 0xff;
819         if (!cpt)
820                 len = sprintf(buf, "disabled\n");
821         else {
822                 v = (adap->params.vpd.cclk * 1000) / cpt;
823                 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
824         }
825         rtnl_unlock();
826         return len;
827 }
828
829 static ssize_t tm_attr_store(struct device *d,
830                              const char *buf, size_t len, int sched)
831 {
832         struct port_info *pi = netdev_priv(to_net_dev(d));
833         struct adapter *adap = pi->adapter;
834         unsigned int val;
835         ssize_t ret;
836
837         if (!capable(CAP_NET_ADMIN))
838                 return -EPERM;
839
840         ret = kstrtouint(buf, 0, &val);
841         if (ret)
842                 return ret;
843         if (val > 10000000)
844                 return -EINVAL;
845
846         rtnl_lock();
847         ret = t3_config_sched(adap, val, sched);
848         if (!ret)
849                 ret = len;
850         rtnl_unlock();
851         return ret;
852 }
853
854 #define TM_ATTR(name, sched) \
855 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
856                            char *buf) \
857 { \
858         return tm_attr_show(d, buf, sched); \
859 } \
860 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
861                             const char *buf, size_t len) \
862 { \
863         return tm_attr_store(d, buf, len, sched); \
864 } \
865 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
866
867 TM_ATTR(sched0, 0);
868 TM_ATTR(sched1, 1);
869 TM_ATTR(sched2, 2);
870 TM_ATTR(sched3, 3);
871 TM_ATTR(sched4, 4);
872 TM_ATTR(sched5, 5);
873 TM_ATTR(sched6, 6);
874 TM_ATTR(sched7, 7);
875
876 static struct attribute *offload_attrs[] = {
877         &dev_attr_sched0.attr,
878         &dev_attr_sched1.attr,
879         &dev_attr_sched2.attr,
880         &dev_attr_sched3.attr,
881         &dev_attr_sched4.attr,
882         &dev_attr_sched5.attr,
883         &dev_attr_sched6.attr,
884         &dev_attr_sched7.attr,
885         NULL
886 };
887
888 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
889
890 /*
891  * Sends an sk_buff to an offload queue driver
892  * after dealing with any active network taps.
893  */
894 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
895 {
896         int ret;
897
898         local_bh_disable();
899         ret = t3_offload_tx(tdev, skb);
900         local_bh_enable();
901         return ret;
902 }
903
904 static int write_smt_entry(struct adapter *adapter, int idx)
905 {
906         struct cpl_smt_write_req *req;
907         struct port_info *pi = netdev_priv(adapter->port[idx]);
908         struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
909
910         if (!skb)
911                 return -ENOMEM;
912
913         req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
914         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
915         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
916         req->mtu_idx = NMTUS - 1;       /* should be 0 but there's a T3 bug */
917         req->iff = idx;
918         memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
919         memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
920         skb->priority = 1;
921         offload_tx(&adapter->tdev, skb);
922         return 0;
923 }
924
925 static int init_smt(struct adapter *adapter)
926 {
927         int i;
928
929         for_each_port(adapter, i)
930             write_smt_entry(adapter, i);
931         return 0;
932 }
933
934 static void init_port_mtus(struct adapter *adapter)
935 {
936         unsigned int mtus = adapter->port[0]->mtu;
937
938         if (adapter->port[1])
939                 mtus |= adapter->port[1]->mtu << 16;
940         t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
941 }
942
943 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
944                               int hi, int port)
945 {
946         struct sk_buff *skb;
947         struct mngt_pktsched_wr *req;
948         int ret;
949
950         skb = alloc_skb(sizeof(*req), GFP_KERNEL);
951         if (!skb)
952                 skb = adap->nofail_skb;
953         if (!skb)
954                 return -ENOMEM;
955
956         req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
957         req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
958         req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
959         req->sched = sched;
960         req->idx = qidx;
961         req->min = lo;
962         req->max = hi;
963         req->binding = port;
964         ret = t3_mgmt_tx(adap, skb);
965         if (skb == adap->nofail_skb) {
966                 adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
967                                              GFP_KERNEL);
968                 if (!adap->nofail_skb)
969                         ret = -ENOMEM;
970         }
971
972         return ret;
973 }
974
975 static int bind_qsets(struct adapter *adap)
976 {
977         int i, j, err = 0;
978
979         for_each_port(adap, i) {
980                 const struct port_info *pi = adap2pinfo(adap, i);
981
982                 for (j = 0; j < pi->nqsets; ++j) {
983                         int ret = send_pktsched_cmd(adap, 1,
984                                                     pi->first_qset + j, -1,
985                                                     -1, i);
986                         if (ret)
987                                 err = ret;
988                 }
989         }
990
991         return err;
992 }
993
994 /*(DEBLOBBED)*/
995 #define FW_FNAME "/*(DEBLOBBED)*/"
996 /*(DEBLOBBED)*/
997 #define TPSRAM_NAME "/*(DEBLOBBED)*/"
998 #define AEL2005_OPT_EDC_NAME "/*(DEBLOBBED)*/"
999 #define AEL2005_TWX_EDC_NAME "/*(DEBLOBBED)*/"
1000 #define AEL2020_TWX_EDC_NAME "/*(DEBLOBBED)*/"
1001 /*(DEBLOBBED)*/
1002
1003 static inline const char *get_edc_fw_name(int edc_idx)
1004 {
1005         const char *fw_name = NULL;
1006
1007         switch (edc_idx) {
1008         case EDC_OPT_AEL2005:
1009                 fw_name = AEL2005_OPT_EDC_NAME;
1010                 break;
1011         case EDC_TWX_AEL2005:
1012                 fw_name = AEL2005_TWX_EDC_NAME;
1013                 break;
1014         case EDC_TWX_AEL2020:
1015                 fw_name = AEL2020_TWX_EDC_NAME;
1016                 break;
1017         }
1018         return fw_name;
1019 }
1020
1021 int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
1022 {
1023         struct adapter *adapter = phy->adapter;
1024         const struct firmware *fw;
1025         const char *fw_name;
1026         u32 csum;
1027         const __be32 *p;
1028         u16 *cache = phy->phy_cache;
1029         int i, ret = -EINVAL;
1030
1031         fw_name = get_edc_fw_name(edc_idx);
1032         if (fw_name)
1033                 ret = reject_firmware(&fw, fw_name, &adapter->pdev->dev);
1034         if (ret < 0) {
1035                 dev_err(&adapter->pdev->dev,
1036                         "could not upgrade firmware: unable to load %s\n",
1037                         fw_name);
1038                 return ret;
1039         }
1040
1041         /* check size, take checksum in account */
1042         if (fw->size > size + 4) {
1043                 CH_ERR(adapter, "firmware image too large %u, expected %d\n",
1044                        (unsigned int)fw->size, size + 4);
1045                 ret = -EINVAL;
1046         }
1047
1048         /* compute checksum */
1049         p = (const __be32 *)fw->data;
1050         for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++)
1051                 csum += ntohl(p[i]);
1052
1053         if (csum != 0xffffffff) {
1054                 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1055                        csum);
1056                 ret = -EINVAL;
1057         }
1058
1059         for (i = 0; i < size / 4 ; i++) {
1060                 *cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
1061                 *cache++ = be32_to_cpu(p[i]) & 0xffff;
1062         }
1063
1064         release_firmware(fw);
1065
1066         return ret;
1067 }
1068
1069 static int upgrade_fw(struct adapter *adap)
1070 {
1071         int ret;
1072         const struct firmware *fw;
1073         struct device *dev = &adap->pdev->dev;
1074
1075         ret = reject_firmware(&fw, FW_FNAME, dev);
1076         if (ret < 0) {
1077                 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
1078                         FW_FNAME);
1079                 return ret;
1080         }
1081         ret = t3_load_fw(adap, fw->data, fw->size);
1082         release_firmware(fw);
1083
1084         if (ret == 0)
1085                 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
1086                          FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1087         else
1088                 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
1089                         FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1090
1091         return ret;
1092 }
1093
1094 static inline char t3rev2char(struct adapter *adapter)
1095 {
1096         char rev = 0;
1097
1098         switch(adapter->params.rev) {
1099         case T3_REV_B:
1100         case T3_REV_B2:
1101                 rev = 'b';
1102                 break;
1103         case T3_REV_C:
1104                 rev = 'c';
1105                 break;
1106         }
1107         return rev;
1108 }
1109
1110 static int update_tpsram(struct adapter *adap)
1111 {
1112         const struct firmware *tpsram;
1113         char buf[64];
1114         struct device *dev = &adap->pdev->dev;
1115         int ret;
1116         char rev;
1117
1118         rev = t3rev2char(adap);
1119         if (!rev)
1120                 return 0;
1121
1122         snprintf(buf, sizeof(buf), TPSRAM_NAME, rev);
1123
1124         ret = reject_firmware(&tpsram, buf, dev);
1125         if (ret < 0) {
1126                 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
1127                         buf);
1128                 return ret;
1129         }
1130
1131         ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
1132         if (ret)
1133                 goto release_tpsram;
1134
1135         ret = t3_set_proto_sram(adap, tpsram->data);
1136         if (ret == 0)
1137                 dev_info(dev,
1138                          "successful update of protocol engine "
1139                          "to %d.%d.%d\n",
1140                          TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1141         else
1142                 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
1143                         TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1144         if (ret)
1145                 dev_err(dev, "loading protocol SRAM failed\n");
1146
1147 release_tpsram:
1148         release_firmware(tpsram);
1149
1150         return ret;
1151 }
1152
1153 /**
1154  * t3_synchronize_rx - wait for current Rx processing on a port to complete
1155  * @adap: the adapter
1156  * @p: the port
1157  *
1158  * Ensures that current Rx processing on any of the queues associated with
1159  * the given port completes before returning.  We do this by acquiring and
1160  * releasing the locks of the response queues associated with the port.
1161  */
1162 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
1163 {
1164         int i;
1165
1166         for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1167                 struct sge_rspq *q = &adap->sge.qs[i].rspq;
1168
1169                 spin_lock_irq(&q->lock);
1170                 spin_unlock_irq(&q->lock);
1171         }
1172 }
1173
1174 static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
1175 {
1176         struct port_info *pi = netdev_priv(dev);
1177         struct adapter *adapter = pi->adapter;
1178
1179         if (adapter->params.rev > 0) {
1180                 t3_set_vlan_accel(adapter, 1 << pi->port_id,
1181                                   features & NETIF_F_HW_VLAN_CTAG_RX);
1182         } else {
1183                 /* single control for all ports */
1184                 unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_CTAG_RX;
1185
1186                 for_each_port(adapter, i)
1187                         have_vlans |=
1188                                 adapter->port[i]->features &
1189                                 NETIF_F_HW_VLAN_CTAG_RX;
1190
1191                 t3_set_vlan_accel(adapter, 1, have_vlans);
1192         }
1193         t3_synchronize_rx(adapter, pi);
1194 }
1195
1196 /**
1197  *      cxgb_up - enable the adapter
1198  *      @adapter: adapter being enabled
1199  *
1200  *      Called when the first port is enabled, this function performs the
1201  *      actions necessary to make an adapter operational, such as completing
1202  *      the initialization of HW modules, and enabling interrupts.
1203  *
1204  *      Must be called with the rtnl lock held.
1205  */
1206 static int cxgb_up(struct adapter *adap)
1207 {
1208         int i, err;
1209
1210         if (!(adap->flags & FULL_INIT_DONE)) {
1211                 err = t3_check_fw_version(adap);
1212                 if (err == -EINVAL) {
1213                         err = upgrade_fw(adap);
1214                         CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1215                                 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1216                                 FW_VERSION_MICRO, err ? "failed" : "succeeded");
1217                 }
1218
1219                 err = t3_check_tpsram_version(adap);
1220                 if (err == -EINVAL) {
1221                         err = update_tpsram(adap);
1222                         CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1223                                 TP_VERSION_MAJOR, TP_VERSION_MINOR,
1224                                 TP_VERSION_MICRO, err ? "failed" : "succeeded");
1225                 }
1226
1227                 /*
1228                  * Clear interrupts now to catch errors if t3_init_hw fails.
1229                  * We clear them again later as initialization may trigger
1230                  * conditions that can interrupt.
1231                  */
1232                 t3_intr_clear(adap);
1233
1234                 err = t3_init_hw(adap, 0);
1235                 if (err)
1236                         goto out;
1237
1238                 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1239                 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1240
1241                 err = setup_sge_qsets(adap);
1242                 if (err)
1243                         goto out;
1244
1245                 for_each_port(adap, i)
1246                         cxgb_vlan_mode(adap->port[i], adap->port[i]->features);
1247
1248                 setup_rss(adap);
1249                 if (!(adap->flags & NAPI_INIT))
1250                         init_napi(adap);
1251
1252                 t3_start_sge_timers(adap);
1253                 adap->flags |= FULL_INIT_DONE;
1254         }
1255
1256         t3_intr_clear(adap);
1257
1258         if (adap->flags & USING_MSIX) {
1259                 name_msix_vecs(adap);
1260                 err = request_irq(adap->msix_info[0].vec,
1261                                   t3_async_intr_handler, 0,
1262                                   adap->msix_info[0].desc, adap);
1263                 if (err)
1264                         goto irq_err;
1265
1266                 err = request_msix_data_irqs(adap);
1267                 if (err) {
1268                         free_irq(adap->msix_info[0].vec, adap);
1269                         goto irq_err;
1270                 }
1271         } else if ((err = request_irq(adap->pdev->irq,
1272                                       t3_intr_handler(adap,
1273                                                       adap->sge.qs[0].rspq.
1274                                                       polling),
1275                                       (adap->flags & USING_MSI) ?
1276                                        0 : IRQF_SHARED,
1277                                       adap->name, adap)))
1278                 goto irq_err;
1279
1280         enable_all_napi(adap);
1281         t3_sge_start(adap);
1282         t3_intr_enable(adap);
1283
1284         if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1285             is_offload(adap) && init_tp_parity(adap) == 0)
1286                 adap->flags |= TP_PARITY_INIT;
1287
1288         if (adap->flags & TP_PARITY_INIT) {
1289                 t3_write_reg(adap, A_TP_INT_CAUSE,
1290                              F_CMCACHEPERR | F_ARPLUTPERR);
1291                 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1292         }
1293
1294         if (!(adap->flags & QUEUES_BOUND)) {
1295                 int ret = bind_qsets(adap);
1296
1297                 if (ret < 0) {
1298                         CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
1299                         t3_intr_disable(adap);
1300                         free_irq_resources(adap);
1301                         err = ret;
1302                         goto out;
1303                 }
1304                 adap->flags |= QUEUES_BOUND;
1305         }
1306
1307 out:
1308         return err;
1309 irq_err:
1310         CH_ERR(adap, "request_irq failed, err %d\n", err);
1311         goto out;
1312 }
1313
1314 /*
1315  * Release resources when all the ports and offloading have been stopped.
1316  */
1317 static void cxgb_down(struct adapter *adapter, int on_wq)
1318 {
1319         t3_sge_stop(adapter);
1320         spin_lock_irq(&adapter->work_lock);     /* sync with PHY intr task */
1321         t3_intr_disable(adapter);
1322         spin_unlock_irq(&adapter->work_lock);
1323
1324         free_irq_resources(adapter);
1325         quiesce_rx(adapter);
1326         t3_sge_stop(adapter);
1327         if (!on_wq)
1328                 flush_workqueue(cxgb3_wq);/* wait for external IRQ handler */
1329 }
1330
1331 static void schedule_chk_task(struct adapter *adap)
1332 {
1333         unsigned int timeo;
1334
1335         timeo = adap->params.linkpoll_period ?
1336             (HZ * adap->params.linkpoll_period) / 10 :
1337             adap->params.stats_update_period * HZ;
1338         if (timeo)
1339                 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1340 }
1341
1342 static int offload_open(struct net_device *dev)
1343 {
1344         struct port_info *pi = netdev_priv(dev);
1345         struct adapter *adapter = pi->adapter;
1346         struct t3cdev *tdev = dev2t3cdev(dev);
1347         int adap_up = adapter->open_device_map & PORT_MASK;
1348         int err;
1349
1350         if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1351                 return 0;
1352
1353         if (!adap_up && (err = cxgb_up(adapter)) < 0)
1354                 goto out;
1355
1356         t3_tp_set_offload_mode(adapter, 1);
1357         tdev->lldev = adapter->port[0];
1358         err = cxgb3_offload_activate(adapter);
1359         if (err)
1360                 goto out;
1361
1362         init_port_mtus(adapter);
1363         t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1364                      adapter->params.b_wnd,
1365                      adapter->params.rev == 0 ?
1366                      adapter->port[0]->mtu : 0xffff);
1367         init_smt(adapter);
1368
1369         if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1370                 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1371
1372         /* Call back all registered clients */
1373         cxgb3_add_clients(tdev);
1374
1375 out:
1376         /* restore them in case the offload module has changed them */
1377         if (err) {
1378                 t3_tp_set_offload_mode(adapter, 0);
1379                 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1380                 cxgb3_set_dummy_ops(tdev);
1381         }
1382         return err;
1383 }
1384
1385 static int offload_close(struct t3cdev *tdev)
1386 {
1387         struct adapter *adapter = tdev2adap(tdev);
1388         struct t3c_data *td = T3C_DATA(tdev);
1389
1390         if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1391                 return 0;
1392
1393         /* Call back all registered clients */
1394         cxgb3_remove_clients(tdev);
1395
1396         sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1397
1398         /* Flush work scheduled while releasing TIDs */
1399         flush_work(&td->tid_release_task);
1400
1401         tdev->lldev = NULL;
1402         cxgb3_set_dummy_ops(tdev);
1403         t3_tp_set_offload_mode(adapter, 0);
1404         clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1405
1406         if (!adapter->open_device_map)
1407                 cxgb_down(adapter, 0);
1408
1409         cxgb3_offload_deactivate(adapter);
1410         return 0;
1411 }
1412
1413 static int cxgb_open(struct net_device *dev)
1414 {
1415         struct port_info *pi = netdev_priv(dev);
1416         struct adapter *adapter = pi->adapter;
1417         int other_ports = adapter->open_device_map & PORT_MASK;
1418         int err;
1419
1420         if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1421                 return err;
1422
1423         set_bit(pi->port_id, &adapter->open_device_map);
1424         if (is_offload(adapter) && !ofld_disable) {
1425                 err = offload_open(dev);
1426                 if (err)
1427                         pr_warn("Could not initialize offload capabilities\n");
1428         }
1429
1430         netif_set_real_num_tx_queues(dev, pi->nqsets);
1431         err = netif_set_real_num_rx_queues(dev, pi->nqsets);
1432         if (err)
1433                 return err;
1434         link_start(dev);
1435         t3_port_intr_enable(adapter, pi->port_id);
1436         netif_tx_start_all_queues(dev);
1437         if (!other_ports)
1438                 schedule_chk_task(adapter);
1439
1440         cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
1441         return 0;
1442 }
1443
1444 static int __cxgb_close(struct net_device *dev, int on_wq)
1445 {
1446         struct port_info *pi = netdev_priv(dev);
1447         struct adapter *adapter = pi->adapter;
1448
1449         
1450         if (!adapter->open_device_map)
1451                 return 0;
1452
1453         /* Stop link fault interrupts */
1454         t3_xgm_intr_disable(adapter, pi->port_id);
1455         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1456
1457         t3_port_intr_disable(adapter, pi->port_id);
1458         netif_tx_stop_all_queues(dev);
1459         pi->phy.ops->power_down(&pi->phy, 1);
1460         netif_carrier_off(dev);
1461         t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1462
1463         spin_lock_irq(&adapter->work_lock);     /* sync with update task */
1464         clear_bit(pi->port_id, &adapter->open_device_map);
1465         spin_unlock_irq(&adapter->work_lock);
1466
1467         if (!(adapter->open_device_map & PORT_MASK))
1468                 cancel_delayed_work_sync(&adapter->adap_check_task);
1469
1470         if (!adapter->open_device_map)
1471                 cxgb_down(adapter, on_wq);
1472
1473         cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
1474         return 0;
1475 }
1476
1477 static int cxgb_close(struct net_device *dev)
1478 {
1479         return __cxgb_close(dev, 0);
1480 }
1481
1482 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1483 {
1484         struct port_info *pi = netdev_priv(dev);
1485         struct adapter *adapter = pi->adapter;
1486         struct net_device_stats *ns = &pi->netstats;
1487         const struct mac_stats *pstats;
1488
1489         spin_lock(&adapter->stats_lock);
1490         pstats = t3_mac_update_stats(&pi->mac);
1491         spin_unlock(&adapter->stats_lock);
1492
1493         ns->tx_bytes = pstats->tx_octets;
1494         ns->tx_packets = pstats->tx_frames;
1495         ns->rx_bytes = pstats->rx_octets;
1496         ns->rx_packets = pstats->rx_frames;
1497         ns->multicast = pstats->rx_mcast_frames;
1498
1499         ns->tx_errors = pstats->tx_underrun;
1500         ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1501             pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1502             pstats->rx_fifo_ovfl;
1503
1504         /* detailed rx_errors */
1505         ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1506         ns->rx_over_errors = 0;
1507         ns->rx_crc_errors = pstats->rx_fcs_errs;
1508         ns->rx_frame_errors = pstats->rx_symbol_errs;
1509         ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1510         ns->rx_missed_errors = pstats->rx_cong_drops;
1511
1512         /* detailed tx_errors */
1513         ns->tx_aborted_errors = 0;
1514         ns->tx_carrier_errors = 0;
1515         ns->tx_fifo_errors = pstats->tx_underrun;
1516         ns->tx_heartbeat_errors = 0;
1517         ns->tx_window_errors = 0;
1518         return ns;
1519 }
1520
1521 static u32 get_msglevel(struct net_device *dev)
1522 {
1523         struct port_info *pi = netdev_priv(dev);
1524         struct adapter *adapter = pi->adapter;
1525
1526         return adapter->msg_enable;
1527 }
1528
1529 static void set_msglevel(struct net_device *dev, u32 val)
1530 {
1531         struct port_info *pi = netdev_priv(dev);
1532         struct adapter *adapter = pi->adapter;
1533
1534         adapter->msg_enable = val;
1535 }
1536
1537 static const char stats_strings[][ETH_GSTRING_LEN] = {
1538         "TxOctetsOK         ",
1539         "TxFramesOK         ",
1540         "TxMulticastFramesOK",
1541         "TxBroadcastFramesOK",
1542         "TxPauseFrames      ",
1543         "TxUnderrun         ",
1544         "TxExtUnderrun      ",
1545
1546         "TxFrames64         ",
1547         "TxFrames65To127    ",
1548         "TxFrames128To255   ",
1549         "TxFrames256To511   ",
1550         "TxFrames512To1023  ",
1551         "TxFrames1024To1518 ",
1552         "TxFrames1519ToMax  ",
1553
1554         "RxOctetsOK         ",
1555         "RxFramesOK         ",
1556         "RxMulticastFramesOK",
1557         "RxBroadcastFramesOK",
1558         "RxPauseFrames      ",
1559         "RxFCSErrors        ",
1560         "RxSymbolErrors     ",
1561         "RxShortErrors      ",
1562         "RxJabberErrors     ",
1563         "RxLengthErrors     ",
1564         "RxFIFOoverflow     ",
1565
1566         "RxFrames64         ",
1567         "RxFrames65To127    ",
1568         "RxFrames128To255   ",
1569         "RxFrames256To511   ",
1570         "RxFrames512To1023  ",
1571         "RxFrames1024To1518 ",
1572         "RxFrames1519ToMax  ",
1573
1574         "PhyFIFOErrors      ",
1575         "TSO                ",
1576         "VLANextractions    ",
1577         "VLANinsertions     ",
1578         "TxCsumOffload      ",
1579         "RxCsumGood         ",
1580         "LroAggregated      ",
1581         "LroFlushed         ",
1582         "LroNoDesc          ",
1583         "RxDrops            ",
1584
1585         "CheckTXEnToggled   ",
1586         "CheckResets        ",
1587
1588         "LinkFaults         ",
1589 };
1590
1591 static int get_sset_count(struct net_device *dev, int sset)
1592 {
1593         switch (sset) {
1594         case ETH_SS_STATS:
1595                 return ARRAY_SIZE(stats_strings);
1596         default:
1597                 return -EOPNOTSUPP;
1598         }
1599 }
1600
1601 #define T3_REGMAP_SIZE (3 * 1024)
1602
1603 static int get_regs_len(struct net_device *dev)
1604 {
1605         return T3_REGMAP_SIZE;
1606 }
1607
1608 static int get_eeprom_len(struct net_device *dev)
1609 {
1610         return EEPROMSIZE;
1611 }
1612
1613 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1614 {
1615         struct port_info *pi = netdev_priv(dev);
1616         struct adapter *adapter = pi->adapter;
1617         u32 fw_vers = 0;
1618         u32 tp_vers = 0;
1619
1620         spin_lock(&adapter->stats_lock);
1621         t3_get_fw_version(adapter, &fw_vers);
1622         t3_get_tp_version(adapter, &tp_vers);
1623         spin_unlock(&adapter->stats_lock);
1624
1625         strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1626         strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1627         strlcpy(info->bus_info, pci_name(adapter->pdev),
1628                 sizeof(info->bus_info));
1629         if (fw_vers)
1630                 snprintf(info->fw_version, sizeof(info->fw_version),
1631                          "%s %u.%u.%u TP %u.%u.%u",
1632                          G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1633                          G_FW_VERSION_MAJOR(fw_vers),
1634                          G_FW_VERSION_MINOR(fw_vers),
1635                          G_FW_VERSION_MICRO(fw_vers),
1636                          G_TP_VERSION_MAJOR(tp_vers),
1637                          G_TP_VERSION_MINOR(tp_vers),
1638                          G_TP_VERSION_MICRO(tp_vers));
1639 }
1640
1641 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1642 {
1643         if (stringset == ETH_SS_STATS)
1644                 memcpy(data, stats_strings, sizeof(stats_strings));
1645 }
1646
1647 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1648                                             struct port_info *p, int idx)
1649 {
1650         int i;
1651         unsigned long tot = 0;
1652
1653         for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1654                 tot += adapter->sge.qs[i].port_stats[idx];
1655         return tot;
1656 }
1657
1658 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1659                       u64 *data)
1660 {
1661         struct port_info *pi = netdev_priv(dev);
1662         struct adapter *adapter = pi->adapter;
1663         const struct mac_stats *s;
1664
1665         spin_lock(&adapter->stats_lock);
1666         s = t3_mac_update_stats(&pi->mac);
1667         spin_unlock(&adapter->stats_lock);
1668
1669         *data++ = s->tx_octets;
1670         *data++ = s->tx_frames;
1671         *data++ = s->tx_mcast_frames;
1672         *data++ = s->tx_bcast_frames;
1673         *data++ = s->tx_pause;
1674         *data++ = s->tx_underrun;
1675         *data++ = s->tx_fifo_urun;
1676
1677         *data++ = s->tx_frames_64;
1678         *data++ = s->tx_frames_65_127;
1679         *data++ = s->tx_frames_128_255;
1680         *data++ = s->tx_frames_256_511;
1681         *data++ = s->tx_frames_512_1023;
1682         *data++ = s->tx_frames_1024_1518;
1683         *data++ = s->tx_frames_1519_max;
1684
1685         *data++ = s->rx_octets;
1686         *data++ = s->rx_frames;
1687         *data++ = s->rx_mcast_frames;
1688         *data++ = s->rx_bcast_frames;
1689         *data++ = s->rx_pause;
1690         *data++ = s->rx_fcs_errs;
1691         *data++ = s->rx_symbol_errs;
1692         *data++ = s->rx_short;
1693         *data++ = s->rx_jabber;
1694         *data++ = s->rx_too_long;
1695         *data++ = s->rx_fifo_ovfl;
1696
1697         *data++ = s->rx_frames_64;
1698         *data++ = s->rx_frames_65_127;
1699         *data++ = s->rx_frames_128_255;
1700         *data++ = s->rx_frames_256_511;
1701         *data++ = s->rx_frames_512_1023;
1702         *data++ = s->rx_frames_1024_1518;
1703         *data++ = s->rx_frames_1519_max;
1704
1705         *data++ = pi->phy.fifo_errors;
1706
1707         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1708         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1709         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1710         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1711         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1712         *data++ = 0;
1713         *data++ = 0;
1714         *data++ = 0;
1715         *data++ = s->rx_cong_drops;
1716
1717         *data++ = s->num_toggled;
1718         *data++ = s->num_resets;
1719
1720         *data++ = s->link_faults;
1721 }
1722
1723 static inline void reg_block_dump(struct adapter *ap, void *buf,
1724                                   unsigned int start, unsigned int end)
1725 {
1726         u32 *p = buf + start;
1727
1728         for (; start <= end; start += sizeof(u32))
1729                 *p++ = t3_read_reg(ap, start);
1730 }
1731
1732 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1733                      void *buf)
1734 {
1735         struct port_info *pi = netdev_priv(dev);
1736         struct adapter *ap = pi->adapter;
1737
1738         /*
1739          * Version scheme:
1740          * bits 0..9: chip version
1741          * bits 10..15: chip revision
1742          * bit 31: set for PCIe cards
1743          */
1744         regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1745
1746         /*
1747          * We skip the MAC statistics registers because they are clear-on-read.
1748          * Also reading multi-register stats would need to synchronize with the
1749          * periodic mac stats accumulation.  Hard to justify the complexity.
1750          */
1751         memset(buf, 0, T3_REGMAP_SIZE);
1752         reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1753         reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1754         reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1755         reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1756         reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1757         reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1758                        XGM_REG(A_XGM_SERDES_STAT3, 1));
1759         reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1760                        XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1761 }
1762
1763 static int restart_autoneg(struct net_device *dev)
1764 {
1765         struct port_info *p = netdev_priv(dev);
1766
1767         if (!netif_running(dev))
1768                 return -EAGAIN;
1769         if (p->link_config.autoneg != AUTONEG_ENABLE)
1770                 return -EINVAL;
1771         p->phy.ops->autoneg_restart(&p->phy);
1772         return 0;
1773 }
1774
1775 static int set_phys_id(struct net_device *dev,
1776                        enum ethtool_phys_id_state state)
1777 {
1778         struct port_info *pi = netdev_priv(dev);
1779         struct adapter *adapter = pi->adapter;
1780
1781         switch (state) {
1782         case ETHTOOL_ID_ACTIVE:
1783                 return 1;       /* cycle on/off once per second */
1784
1785         case ETHTOOL_ID_OFF:
1786                 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
1787                 break;
1788
1789         case ETHTOOL_ID_ON:
1790         case ETHTOOL_ID_INACTIVE:
1791                 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1792                          F_GPIO0_OUT_VAL);
1793         }
1794
1795         return 0;
1796 }
1797
1798 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1799 {
1800         struct port_info *p = netdev_priv(dev);
1801
1802         cmd->supported = p->link_config.supported;
1803         cmd->advertising = p->link_config.advertising;
1804
1805         if (netif_carrier_ok(dev)) {
1806                 ethtool_cmd_speed_set(cmd, p->link_config.speed);
1807                 cmd->duplex = p->link_config.duplex;
1808         } else {
1809                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
1810                 cmd->duplex = DUPLEX_UNKNOWN;
1811         }
1812
1813         cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1814         cmd->phy_address = p->phy.mdio.prtad;
1815         cmd->transceiver = XCVR_EXTERNAL;
1816         cmd->autoneg = p->link_config.autoneg;
1817         cmd->maxtxpkt = 0;
1818         cmd->maxrxpkt = 0;
1819         return 0;
1820 }
1821
1822 static int speed_duplex_to_caps(int speed, int duplex)
1823 {
1824         int cap = 0;
1825
1826         switch (speed) {
1827         case SPEED_10:
1828                 if (duplex == DUPLEX_FULL)
1829                         cap = SUPPORTED_10baseT_Full;
1830                 else
1831                         cap = SUPPORTED_10baseT_Half;
1832                 break;
1833         case SPEED_100:
1834                 if (duplex == DUPLEX_FULL)
1835                         cap = SUPPORTED_100baseT_Full;
1836                 else
1837                         cap = SUPPORTED_100baseT_Half;
1838                 break;
1839         case SPEED_1000:
1840                 if (duplex == DUPLEX_FULL)
1841                         cap = SUPPORTED_1000baseT_Full;
1842                 else
1843                         cap = SUPPORTED_1000baseT_Half;
1844                 break;
1845         case SPEED_10000:
1846                 if (duplex == DUPLEX_FULL)
1847                         cap = SUPPORTED_10000baseT_Full;
1848         }
1849         return cap;
1850 }
1851
1852 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1853                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1854                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1855                       ADVERTISED_10000baseT_Full)
1856
1857 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1858 {
1859         struct port_info *p = netdev_priv(dev);
1860         struct link_config *lc = &p->link_config;
1861
1862         if (!(lc->supported & SUPPORTED_Autoneg)) {
1863                 /*
1864                  * PHY offers a single speed/duplex.  See if that's what's
1865                  * being requested.
1866                  */
1867                 if (cmd->autoneg == AUTONEG_DISABLE) {
1868                         u32 speed = ethtool_cmd_speed(cmd);
1869                         int cap = speed_duplex_to_caps(speed, cmd->duplex);
1870                         if (lc->supported & cap)
1871                                 return 0;
1872                 }
1873                 return -EINVAL;
1874         }
1875
1876         if (cmd->autoneg == AUTONEG_DISABLE) {
1877                 u32 speed = ethtool_cmd_speed(cmd);
1878                 int cap = speed_duplex_to_caps(speed, cmd->duplex);
1879
1880                 if (!(lc->supported & cap) || (speed == SPEED_1000))
1881                         return -EINVAL;
1882                 lc->requested_speed = speed;
1883                 lc->requested_duplex = cmd->duplex;
1884                 lc->advertising = 0;
1885         } else {
1886                 cmd->advertising &= ADVERTISED_MASK;
1887                 cmd->advertising &= lc->supported;
1888                 if (!cmd->advertising)
1889                         return -EINVAL;
1890                 lc->requested_speed = SPEED_INVALID;
1891                 lc->requested_duplex = DUPLEX_INVALID;
1892                 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1893         }
1894         lc->autoneg = cmd->autoneg;
1895         if (netif_running(dev))
1896                 t3_link_start(&p->phy, &p->mac, lc);
1897         return 0;
1898 }
1899
1900 static void get_pauseparam(struct net_device *dev,
1901                            struct ethtool_pauseparam *epause)
1902 {
1903         struct port_info *p = netdev_priv(dev);
1904
1905         epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1906         epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1907         epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1908 }
1909
1910 static int set_pauseparam(struct net_device *dev,
1911                           struct ethtool_pauseparam *epause)
1912 {
1913         struct port_info *p = netdev_priv(dev);
1914         struct link_config *lc = &p->link_config;
1915
1916         if (epause->autoneg == AUTONEG_DISABLE)
1917                 lc->requested_fc = 0;
1918         else if (lc->supported & SUPPORTED_Autoneg)
1919                 lc->requested_fc = PAUSE_AUTONEG;
1920         else
1921                 return -EINVAL;
1922
1923         if (epause->rx_pause)
1924                 lc->requested_fc |= PAUSE_RX;
1925         if (epause->tx_pause)
1926                 lc->requested_fc |= PAUSE_TX;
1927         if (lc->autoneg == AUTONEG_ENABLE) {
1928                 if (netif_running(dev))
1929                         t3_link_start(&p->phy, &p->mac, lc);
1930         } else {
1931                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1932                 if (netif_running(dev))
1933                         t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1934         }
1935         return 0;
1936 }
1937
1938 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1939 {
1940         struct port_info *pi = netdev_priv(dev);
1941         struct adapter *adapter = pi->adapter;
1942         const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1943
1944         e->rx_max_pending = MAX_RX_BUFFERS;
1945         e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1946         e->tx_max_pending = MAX_TXQ_ENTRIES;
1947
1948         e->rx_pending = q->fl_size;
1949         e->rx_mini_pending = q->rspq_size;
1950         e->rx_jumbo_pending = q->jumbo_size;
1951         e->tx_pending = q->txq_size[0];
1952 }
1953
1954 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1955 {
1956         struct port_info *pi = netdev_priv(dev);
1957         struct adapter *adapter = pi->adapter;
1958         struct qset_params *q;
1959         int i;
1960
1961         if (e->rx_pending > MAX_RX_BUFFERS ||
1962             e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1963             e->tx_pending > MAX_TXQ_ENTRIES ||
1964             e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1965             e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1966             e->rx_pending < MIN_FL_ENTRIES ||
1967             e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1968             e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1969                 return -EINVAL;
1970
1971         if (adapter->flags & FULL_INIT_DONE)
1972                 return -EBUSY;
1973
1974         q = &adapter->params.sge.qset[pi->first_qset];
1975         for (i = 0; i < pi->nqsets; ++i, ++q) {
1976                 q->rspq_size = e->rx_mini_pending;
1977                 q->fl_size = e->rx_pending;
1978                 q->jumbo_size = e->rx_jumbo_pending;
1979                 q->txq_size[0] = e->tx_pending;
1980                 q->txq_size[1] = e->tx_pending;
1981                 q->txq_size[2] = e->tx_pending;
1982         }
1983         return 0;
1984 }
1985
1986 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1987 {
1988         struct port_info *pi = netdev_priv(dev);
1989         struct adapter *adapter = pi->adapter;
1990         struct qset_params *qsp;
1991         struct sge_qset *qs;
1992         int i;
1993
1994         if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1995                 return -EINVAL;
1996
1997         for (i = 0; i < pi->nqsets; i++) {
1998                 qsp = &adapter->params.sge.qset[i];
1999                 qs = &adapter->sge.qs[i];
2000                 qsp->coalesce_usecs = c->rx_coalesce_usecs;
2001                 t3_update_qset_coalesce(qs, qsp);
2002         }
2003
2004         return 0;
2005 }
2006
2007 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2008 {
2009         struct port_info *pi = netdev_priv(dev);
2010         struct adapter *adapter = pi->adapter;
2011         struct qset_params *q = adapter->params.sge.qset;
2012
2013         c->rx_coalesce_usecs = q->coalesce_usecs;
2014         return 0;
2015 }
2016
2017 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2018                       u8 * data)
2019 {
2020         struct port_info *pi = netdev_priv(dev);
2021         struct adapter *adapter = pi->adapter;
2022         int i, err = 0;
2023
2024         u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2025         if (!buf)
2026                 return -ENOMEM;
2027
2028         e->magic = EEPROM_MAGIC;
2029         for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2030                 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
2031
2032         if (!err)
2033                 memcpy(data, buf + e->offset, e->len);
2034         kfree(buf);
2035         return err;
2036 }
2037
2038 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2039                       u8 * data)
2040 {
2041         struct port_info *pi = netdev_priv(dev);
2042         struct adapter *adapter = pi->adapter;
2043         u32 aligned_offset, aligned_len;
2044         __le32 *p;
2045         u8 *buf;
2046         int err;
2047
2048         if (eeprom->magic != EEPROM_MAGIC)
2049                 return -EINVAL;
2050
2051         aligned_offset = eeprom->offset & ~3;
2052         aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2053
2054         if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2055                 buf = kmalloc(aligned_len, GFP_KERNEL);
2056                 if (!buf)
2057                         return -ENOMEM;
2058                 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
2059                 if (!err && aligned_len > 4)
2060                         err = t3_seeprom_read(adapter,
2061                                               aligned_offset + aligned_len - 4,
2062                                               (__le32 *) & buf[aligned_len - 4]);
2063                 if (err)
2064                         goto out;
2065                 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2066         } else
2067                 buf = data;
2068
2069         err = t3_seeprom_wp(adapter, 0);
2070         if (err)
2071                 goto out;
2072
2073         for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
2074                 err = t3_seeprom_write(adapter, aligned_offset, *p);
2075                 aligned_offset += 4;
2076         }
2077
2078         if (!err)
2079                 err = t3_seeprom_wp(adapter, 1);
2080 out:
2081         if (buf != data)
2082                 kfree(buf);
2083         return err;
2084 }
2085
2086 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2087 {
2088         wol->supported = 0;
2089         wol->wolopts = 0;
2090         memset(&wol->sopass, 0, sizeof(wol->sopass));
2091 }
2092
2093 static const struct ethtool_ops cxgb_ethtool_ops = {
2094         .get_settings = get_settings,
2095         .set_settings = set_settings,
2096         .get_drvinfo = get_drvinfo,
2097         .get_msglevel = get_msglevel,
2098         .set_msglevel = set_msglevel,
2099         .get_ringparam = get_sge_param,
2100         .set_ringparam = set_sge_param,
2101         .get_coalesce = get_coalesce,
2102         .set_coalesce = set_coalesce,
2103         .get_eeprom_len = get_eeprom_len,
2104         .get_eeprom = get_eeprom,
2105         .set_eeprom = set_eeprom,
2106         .get_pauseparam = get_pauseparam,
2107         .set_pauseparam = set_pauseparam,
2108         .get_link = ethtool_op_get_link,
2109         .get_strings = get_strings,
2110         .set_phys_id = set_phys_id,
2111         .nway_reset = restart_autoneg,
2112         .get_sset_count = get_sset_count,
2113         .get_ethtool_stats = get_stats,
2114         .get_regs_len = get_regs_len,
2115         .get_regs = get_regs,
2116         .get_wol = get_wol,
2117 };
2118
2119 static int in_range(int val, int lo, int hi)
2120 {
2121         return val < 0 || (val <= hi && val >= lo);
2122 }
2123
2124 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2125 {
2126         struct port_info *pi = netdev_priv(dev);
2127         struct adapter *adapter = pi->adapter;
2128         u32 cmd;
2129         int ret;
2130
2131         if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
2132                 return -EFAULT;
2133
2134         switch (cmd) {
2135         case CHELSIO_SET_QSET_PARAMS:{
2136                 int i;
2137                 struct qset_params *q;
2138                 struct ch_qset_params t;
2139                 int q1 = pi->first_qset;
2140                 int nqsets = pi->nqsets;
2141
2142                 if (!capable(CAP_NET_ADMIN))
2143                         return -EPERM;
2144                 if (copy_from_user(&t, useraddr, sizeof(t)))
2145                         return -EFAULT;
2146                 if (t.cmd != CHELSIO_SET_QSET_PARAMS)
2147                         return -EINVAL;
2148                 if (t.qset_idx >= SGE_QSETS)
2149                         return -EINVAL;
2150                 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
2151                     !in_range(t.cong_thres, 0, 255) ||
2152                     !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
2153                               MAX_TXQ_ENTRIES) ||
2154                     !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
2155                               MAX_TXQ_ENTRIES) ||
2156                     !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2157                               MAX_CTRL_TXQ_ENTRIES) ||
2158                     !in_range(t.fl_size[0], MIN_FL_ENTRIES,
2159                               MAX_RX_BUFFERS) ||
2160                     !in_range(t.fl_size[1], MIN_FL_ENTRIES,
2161                               MAX_RX_JUMBO_BUFFERS) ||
2162                     !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
2163                               MAX_RSPQ_ENTRIES))
2164                         return -EINVAL;
2165
2166                 if ((adapter->flags & FULL_INIT_DONE) &&
2167                         (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2168                         t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
2169                         t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
2170                         t.polling >= 0 || t.cong_thres >= 0))
2171                         return -EBUSY;
2172
2173                 /* Allow setting of any available qset when offload enabled */
2174                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2175                         q1 = 0;
2176                         for_each_port(adapter, i) {
2177                                 pi = adap2pinfo(adapter, i);
2178                                 nqsets += pi->first_qset + pi->nqsets;
2179                         }
2180                 }
2181
2182                 if (t.qset_idx < q1)
2183                         return -EINVAL;
2184                 if (t.qset_idx > q1 + nqsets - 1)
2185                         return -EINVAL;
2186
2187                 q = &adapter->params.sge.qset[t.qset_idx];
2188
2189                 if (t.rspq_size >= 0)
2190                         q->rspq_size = t.rspq_size;
2191                 if (t.fl_size[0] >= 0)
2192                         q->fl_size = t.fl_size[0];
2193                 if (t.fl_size[1] >= 0)
2194                         q->jumbo_size = t.fl_size[1];
2195                 if (t.txq_size[0] >= 0)
2196                         q->txq_size[0] = t.txq_size[0];
2197                 if (t.txq_size[1] >= 0)
2198                         q->txq_size[1] = t.txq_size[1];
2199                 if (t.txq_size[2] >= 0)
2200                         q->txq_size[2] = t.txq_size[2];
2201                 if (t.cong_thres >= 0)
2202                         q->cong_thres = t.cong_thres;
2203                 if (t.intr_lat >= 0) {
2204                         struct sge_qset *qs =
2205                                 &adapter->sge.qs[t.qset_idx];
2206
2207                         q->coalesce_usecs = t.intr_lat;
2208                         t3_update_qset_coalesce(qs, q);
2209                 }
2210                 if (t.polling >= 0) {
2211                         if (adapter->flags & USING_MSIX)
2212                                 q->polling = t.polling;
2213                         else {
2214                                 /* No polling with INTx for T3A */
2215                                 if (adapter->params.rev == 0 &&
2216                                         !(adapter->flags & USING_MSI))
2217                                         t.polling = 0;
2218
2219                                 for (i = 0; i < SGE_QSETS; i++) {
2220                                         q = &adapter->params.sge.
2221                                                 qset[i];
2222                                         q->polling = t.polling;
2223                                 }
2224                         }
2225                 }
2226
2227                 if (t.lro >= 0) {
2228                         if (t.lro)
2229                                 dev->wanted_features |= NETIF_F_GRO;
2230                         else
2231                                 dev->wanted_features &= ~NETIF_F_GRO;
2232                         netdev_update_features(dev);
2233                 }
2234
2235                 break;
2236         }
2237         case CHELSIO_GET_QSET_PARAMS:{
2238                 struct qset_params *q;
2239                 struct ch_qset_params t;
2240                 int q1 = pi->first_qset;
2241                 int nqsets = pi->nqsets;
2242                 int i;
2243
2244                 if (copy_from_user(&t, useraddr, sizeof(t)))
2245                         return -EFAULT;
2246
2247                 if (t.cmd != CHELSIO_GET_QSET_PARAMS)
2248                         return -EINVAL;
2249
2250                 /* Display qsets for all ports when offload enabled */
2251                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2252                         q1 = 0;
2253                         for_each_port(adapter, i) {
2254                                 pi = adap2pinfo(adapter, i);
2255                                 nqsets = pi->first_qset + pi->nqsets;
2256                         }
2257                 }
2258
2259                 if (t.qset_idx >= nqsets)
2260                         return -EINVAL;
2261                 t.qset_idx = array_index_nospec(t.qset_idx, nqsets);
2262
2263                 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2264                 t.rspq_size = q->rspq_size;
2265                 t.txq_size[0] = q->txq_size[0];
2266                 t.txq_size[1] = q->txq_size[1];
2267                 t.txq_size[2] = q->txq_size[2];
2268                 t.fl_size[0] = q->fl_size;
2269                 t.fl_size[1] = q->jumbo_size;
2270                 t.polling = q->polling;
2271                 t.lro = !!(dev->features & NETIF_F_GRO);
2272                 t.intr_lat = q->coalesce_usecs;
2273                 t.cong_thres = q->cong_thres;
2274                 t.qnum = q1;
2275
2276                 if (adapter->flags & USING_MSIX)
2277                         t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2278                 else
2279                         t.vector = adapter->pdev->irq;
2280
2281                 if (copy_to_user(useraddr, &t, sizeof(t)))
2282                         return -EFAULT;
2283                 break;
2284         }
2285         case CHELSIO_SET_QSET_NUM:{
2286                 struct ch_reg edata;
2287                 unsigned int i, first_qset = 0, other_qsets = 0;
2288
2289                 if (!capable(CAP_NET_ADMIN))
2290                         return -EPERM;
2291                 if (adapter->flags & FULL_INIT_DONE)
2292                         return -EBUSY;
2293                 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2294                         return -EFAULT;
2295                 if (edata.cmd != CHELSIO_SET_QSET_NUM)
2296                         return -EINVAL;
2297                 if (edata.val < 1 ||
2298                         (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2299                         return -EINVAL;
2300
2301                 for_each_port(adapter, i)
2302                         if (adapter->port[i] && adapter->port[i] != dev)
2303                                 other_qsets += adap2pinfo(adapter, i)->nqsets;
2304
2305                 if (edata.val + other_qsets > SGE_QSETS)
2306                         return -EINVAL;
2307
2308                 pi->nqsets = edata.val;
2309
2310                 for_each_port(adapter, i)
2311                         if (adapter->port[i]) {
2312                                 pi = adap2pinfo(adapter, i);
2313                                 pi->first_qset = first_qset;
2314                                 first_qset += pi->nqsets;
2315                         }
2316                 break;
2317         }
2318         case CHELSIO_GET_QSET_NUM:{
2319                 struct ch_reg edata;
2320
2321                 memset(&edata, 0, sizeof(struct ch_reg));
2322
2323                 edata.cmd = CHELSIO_GET_QSET_NUM;
2324                 edata.val = pi->nqsets;
2325                 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2326                         return -EFAULT;
2327                 break;
2328         }
2329         case CHELSIO_LOAD_FW:{
2330                 u8 *fw_data;
2331                 struct ch_mem_range t;
2332
2333                 if (!capable(CAP_SYS_RAWIO))
2334                         return -EPERM;
2335                 if (copy_from_user(&t, useraddr, sizeof(t)))
2336                         return -EFAULT;
2337                 if (t.cmd != CHELSIO_LOAD_FW)
2338                         return -EINVAL;
2339                 /* Check t.len sanity ? */
2340                 fw_data = memdup_user(useraddr + sizeof(t), t.len);
2341                 if (IS_ERR(fw_data))
2342                         return PTR_ERR(fw_data);
2343
2344                 ret = t3_load_fw(adapter, fw_data, t.len);
2345                 kfree(fw_data);
2346                 if (ret)
2347                         return ret;
2348                 break;
2349         }
2350         case CHELSIO_SETMTUTAB:{
2351                 struct ch_mtus m;
2352                 int i;
2353
2354                 if (!is_offload(adapter))
2355                         return -EOPNOTSUPP;
2356                 if (!capable(CAP_NET_ADMIN))
2357                         return -EPERM;
2358                 if (offload_running(adapter))
2359                         return -EBUSY;
2360                 if (copy_from_user(&m, useraddr, sizeof(m)))
2361                         return -EFAULT;
2362                 if (m.cmd != CHELSIO_SETMTUTAB)
2363                         return -EINVAL;
2364                 if (m.nmtus != NMTUS)
2365                         return -EINVAL;
2366                 if (m.mtus[0] < 81)     /* accommodate SACK */
2367                         return -EINVAL;
2368
2369                 /* MTUs must be in ascending order */
2370                 for (i = 1; i < NMTUS; ++i)
2371                         if (m.mtus[i] < m.mtus[i - 1])
2372                                 return -EINVAL;
2373
2374                 memcpy(adapter->params.mtus, m.mtus,
2375                         sizeof(adapter->params.mtus));
2376                 break;
2377         }
2378         case CHELSIO_GET_PM:{
2379                 struct tp_params *p = &adapter->params.tp;
2380                 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2381
2382                 if (!is_offload(adapter))
2383                         return -EOPNOTSUPP;
2384                 m.tx_pg_sz = p->tx_pg_size;
2385                 m.tx_num_pg = p->tx_num_pgs;
2386                 m.rx_pg_sz = p->rx_pg_size;
2387                 m.rx_num_pg = p->rx_num_pgs;
2388                 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2389                 if (copy_to_user(useraddr, &m, sizeof(m)))
2390                         return -EFAULT;
2391                 break;
2392         }
2393         case CHELSIO_SET_PM:{
2394                 struct ch_pm m;
2395                 struct tp_params *p = &adapter->params.tp;
2396
2397                 if (!is_offload(adapter))
2398                         return -EOPNOTSUPP;
2399                 if (!capable(CAP_NET_ADMIN))
2400                         return -EPERM;
2401                 if (adapter->flags & FULL_INIT_DONE)
2402                         return -EBUSY;
2403                 if (copy_from_user(&m, useraddr, sizeof(m)))
2404                         return -EFAULT;
2405                 if (m.cmd != CHELSIO_SET_PM)
2406                         return -EINVAL;
2407                 if (!is_power_of_2(m.rx_pg_sz) ||
2408                         !is_power_of_2(m.tx_pg_sz))
2409                         return -EINVAL; /* not power of 2 */
2410                 if (!(m.rx_pg_sz & 0x14000))
2411                         return -EINVAL; /* not 16KB or 64KB */
2412                 if (!(m.tx_pg_sz & 0x1554000))
2413                         return -EINVAL;
2414                 if (m.tx_num_pg == -1)
2415                         m.tx_num_pg = p->tx_num_pgs;
2416                 if (m.rx_num_pg == -1)
2417                         m.rx_num_pg = p->rx_num_pgs;
2418                 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2419                         return -EINVAL;
2420                 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2421                         m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2422                         return -EINVAL;
2423                 p->rx_pg_size = m.rx_pg_sz;
2424                 p->tx_pg_size = m.tx_pg_sz;
2425                 p->rx_num_pgs = m.rx_num_pg;
2426                 p->tx_num_pgs = m.tx_num_pg;
2427                 break;
2428         }
2429         case CHELSIO_GET_MEM:{
2430                 struct ch_mem_range t;
2431                 struct mc7 *mem;
2432                 u64 buf[32];
2433
2434                 if (!is_offload(adapter))
2435                         return -EOPNOTSUPP;
2436                 if (!capable(CAP_NET_ADMIN))
2437                         return -EPERM;
2438                 if (!(adapter->flags & FULL_INIT_DONE))
2439                         return -EIO;    /* need the memory controllers */
2440                 if (copy_from_user(&t, useraddr, sizeof(t)))
2441                         return -EFAULT;
2442                 if (t.cmd != CHELSIO_GET_MEM)
2443                         return -EINVAL;
2444                 if ((t.addr & 7) || (t.len & 7))
2445                         return -EINVAL;
2446                 if (t.mem_id == MEM_CM)
2447                         mem = &adapter->cm;
2448                 else if (t.mem_id == MEM_PMRX)
2449                         mem = &adapter->pmrx;
2450                 else if (t.mem_id == MEM_PMTX)
2451                         mem = &adapter->pmtx;
2452                 else
2453                         return -EINVAL;
2454
2455                 /*
2456                  * Version scheme:
2457                  * bits 0..9: chip version
2458                  * bits 10..15: chip revision
2459                  */
2460                 t.version = 3 | (adapter->params.rev << 10);
2461                 if (copy_to_user(useraddr, &t, sizeof(t)))
2462                         return -EFAULT;
2463
2464                 /*
2465                  * Read 256 bytes at a time as len can be large and we don't
2466                  * want to use huge intermediate buffers.
2467                  */
2468                 useraddr += sizeof(t);  /* advance to start of buffer */
2469                 while (t.len) {
2470                         unsigned int chunk =
2471                                 min_t(unsigned int, t.len, sizeof(buf));
2472
2473                         ret =
2474                                 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2475                                                 buf);
2476                         if (ret)
2477                                 return ret;
2478                         if (copy_to_user(useraddr, buf, chunk))
2479                                 return -EFAULT;
2480                         useraddr += chunk;
2481                         t.addr += chunk;
2482                         t.len -= chunk;
2483                 }
2484                 break;
2485         }
2486         case CHELSIO_SET_TRACE_FILTER:{
2487                 struct ch_trace t;
2488                 const struct trace_params *tp;
2489
2490                 if (!capable(CAP_NET_ADMIN))
2491                         return -EPERM;
2492                 if (!offload_running(adapter))
2493                         return -EAGAIN;
2494                 if (copy_from_user(&t, useraddr, sizeof(t)))
2495                         return -EFAULT;
2496                 if (t.cmd != CHELSIO_SET_TRACE_FILTER)
2497                         return -EINVAL;
2498
2499                 tp = (const struct trace_params *)&t.sip;
2500                 if (t.config_tx)
2501                         t3_config_trace_filter(adapter, tp, 0,
2502                                                 t.invert_match,
2503                                                 t.trace_tx);
2504                 if (t.config_rx)
2505                         t3_config_trace_filter(adapter, tp, 1,
2506                                                 t.invert_match,
2507                                                 t.trace_rx);
2508                 break;
2509         }
2510         default:
2511                 return -EOPNOTSUPP;
2512         }
2513         return 0;
2514 }
2515
2516 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2517 {
2518         struct mii_ioctl_data *data = if_mii(req);
2519         struct port_info *pi = netdev_priv(dev);
2520         struct adapter *adapter = pi->adapter;
2521
2522         switch (cmd) {
2523         case SIOCGMIIREG:
2524         case SIOCSMIIREG:
2525                 /* Convert phy_id from older PRTAD/DEVAD format */
2526                 if (is_10G(adapter) &&
2527                     !mdio_phy_id_is_c45(data->phy_id) &&
2528                     (data->phy_id & 0x1f00) &&
2529                     !(data->phy_id & 0xe0e0))
2530                         data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2531                                                        data->phy_id & 0x1f);
2532                 /* FALLTHRU */
2533         case SIOCGMIIPHY:
2534                 return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
2535         case SIOCCHIOCTL:
2536                 return cxgb_extension_ioctl(dev, req->ifr_data);
2537         default:
2538                 return -EOPNOTSUPP;
2539         }
2540 }
2541
2542 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2543 {
2544         struct port_info *pi = netdev_priv(dev);
2545         struct adapter *adapter = pi->adapter;
2546         int ret;
2547
2548         if (new_mtu < 81)       /* accommodate SACK */
2549                 return -EINVAL;
2550         if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2551                 return ret;
2552         dev->mtu = new_mtu;
2553         init_port_mtus(adapter);
2554         if (adapter->params.rev == 0 && offload_running(adapter))
2555                 t3_load_mtus(adapter, adapter->params.mtus,
2556                              adapter->params.a_wnd, adapter->params.b_wnd,
2557                              adapter->port[0]->mtu);
2558         return 0;
2559 }
2560
2561 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2562 {
2563         struct port_info *pi = netdev_priv(dev);
2564         struct adapter *adapter = pi->adapter;
2565         struct sockaddr *addr = p;
2566
2567         if (!is_valid_ether_addr(addr->sa_data))
2568                 return -EADDRNOTAVAIL;
2569
2570         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2571         t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
2572         if (offload_running(adapter))
2573                 write_smt_entry(adapter, pi->port_id);
2574         return 0;
2575 }
2576
2577 static netdev_features_t cxgb_fix_features(struct net_device *dev,
2578         netdev_features_t features)
2579 {
2580         /*
2581          * Since there is no support for separate rx/tx vlan accel
2582          * enable/disable make sure tx flag is always in same state as rx.
2583          */
2584         if (features & NETIF_F_HW_VLAN_CTAG_RX)
2585                 features |= NETIF_F_HW_VLAN_CTAG_TX;
2586         else
2587                 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2588
2589         return features;
2590 }
2591
2592 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2593 {
2594         netdev_features_t changed = dev->features ^ features;
2595
2596         if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2597                 cxgb_vlan_mode(dev, features);
2598
2599         return 0;
2600 }
2601
2602 #ifdef CONFIG_NET_POLL_CONTROLLER
2603 static void cxgb_netpoll(struct net_device *dev)
2604 {
2605         struct port_info *pi = netdev_priv(dev);
2606         struct adapter *adapter = pi->adapter;
2607         int qidx;
2608
2609         for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2610                 struct sge_qset *qs = &adapter->sge.qs[qidx];
2611                 void *source;
2612
2613                 if (adapter->flags & USING_MSIX)
2614                         source = qs;
2615                 else
2616                         source = adapter;
2617
2618                 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2619         }
2620 }
2621 #endif
2622
2623 /*
2624  * Periodic accumulation of MAC statistics.
2625  */
2626 static void mac_stats_update(struct adapter *adapter)
2627 {
2628         int i;
2629
2630         for_each_port(adapter, i) {
2631                 struct net_device *dev = adapter->port[i];
2632                 struct port_info *p = netdev_priv(dev);
2633
2634                 if (netif_running(dev)) {
2635                         spin_lock(&adapter->stats_lock);
2636                         t3_mac_update_stats(&p->mac);
2637                         spin_unlock(&adapter->stats_lock);
2638                 }
2639         }
2640 }
2641
2642 static void check_link_status(struct adapter *adapter)
2643 {
2644         int i;
2645
2646         for_each_port(adapter, i) {
2647                 struct net_device *dev = adapter->port[i];
2648                 struct port_info *p = netdev_priv(dev);
2649                 int link_fault;
2650
2651                 spin_lock_irq(&adapter->work_lock);
2652                 link_fault = p->link_fault;
2653                 spin_unlock_irq(&adapter->work_lock);
2654
2655                 if (link_fault) {
2656                         t3_link_fault(adapter, i);
2657                         continue;
2658                 }
2659
2660                 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2661                         t3_xgm_intr_disable(adapter, i);
2662                         t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2663
2664                         t3_link_changed(adapter, i);
2665                         t3_xgm_intr_enable(adapter, i);
2666                 }
2667         }
2668 }
2669
2670 static void check_t3b2_mac(struct adapter *adapter)
2671 {
2672         int i;
2673
2674         if (!rtnl_trylock())    /* synchronize with ifdown */
2675                 return;
2676
2677         for_each_port(adapter, i) {
2678                 struct net_device *dev = adapter->port[i];
2679                 struct port_info *p = netdev_priv(dev);
2680                 int status;
2681
2682                 if (!netif_running(dev))
2683                         continue;
2684
2685                 status = 0;
2686                 if (netif_running(dev) && netif_carrier_ok(dev))
2687                         status = t3b2_mac_watchdog_task(&p->mac);
2688                 if (status == 1)
2689                         p->mac.stats.num_toggled++;
2690                 else if (status == 2) {
2691                         struct cmac *mac = &p->mac;
2692
2693                         t3_mac_set_mtu(mac, dev->mtu);
2694                         t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
2695                         cxgb_set_rxmode(dev);
2696                         t3_link_start(&p->phy, mac, &p->link_config);
2697                         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2698                         t3_port_intr_enable(adapter, p->port_id);
2699                         p->mac.stats.num_resets++;
2700                 }
2701         }
2702         rtnl_unlock();
2703 }
2704
2705
2706 static void t3_adap_check_task(struct work_struct *work)
2707 {
2708         struct adapter *adapter = container_of(work, struct adapter,
2709                                                adap_check_task.work);
2710         const struct adapter_params *p = &adapter->params;
2711         int port;
2712         unsigned int v, status, reset;
2713
2714         adapter->check_task_cnt++;
2715
2716         check_link_status(adapter);
2717
2718         /* Accumulate MAC stats if needed */
2719         if (!p->linkpoll_period ||
2720             (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2721             p->stats_update_period) {
2722                 mac_stats_update(adapter);
2723                 adapter->check_task_cnt = 0;
2724         }
2725
2726         if (p->rev == T3_REV_B2)
2727                 check_t3b2_mac(adapter);
2728
2729         /*
2730          * Scan the XGMAC's to check for various conditions which we want to
2731          * monitor in a periodic polling manner rather than via an interrupt
2732          * condition.  This is used for conditions which would otherwise flood
2733          * the system with interrupts and we only really need to know that the
2734          * conditions are "happening" ...  For each condition we count the
2735          * detection of the condition and reset it for the next polling loop.
2736          */
2737         for_each_port(adapter, port) {
2738                 struct cmac *mac =  &adap2pinfo(adapter, port)->mac;
2739                 u32 cause;
2740
2741                 cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2742                 reset = 0;
2743                 if (cause & F_RXFIFO_OVERFLOW) {
2744                         mac->stats.rx_fifo_ovfl++;
2745                         reset |= F_RXFIFO_OVERFLOW;
2746                 }
2747
2748                 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2749         }
2750
2751         /*
2752          * We do the same as above for FL_EMPTY interrupts.
2753          */
2754         status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2755         reset = 0;
2756
2757         if (status & F_FLEMPTY) {
2758                 struct sge_qset *qs = &adapter->sge.qs[0];
2759                 int i = 0;
2760
2761                 reset |= F_FLEMPTY;
2762
2763                 v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2764                     0xffff;
2765
2766                 while (v) {
2767                         qs->fl[i].empty += (v & 1);
2768                         if (i)
2769                                 qs++;
2770                         i ^= 1;
2771                         v >>= 1;
2772                 }
2773         }
2774
2775         t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2776
2777         /* Schedule the next check update if any port is active. */
2778         spin_lock_irq(&adapter->work_lock);
2779         if (adapter->open_device_map & PORT_MASK)
2780                 schedule_chk_task(adapter);
2781         spin_unlock_irq(&adapter->work_lock);
2782 }
2783
2784 static void db_full_task(struct work_struct *work)
2785 {
2786         struct adapter *adapter = container_of(work, struct adapter,
2787                                                db_full_task);
2788
2789         cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
2790 }
2791
2792 static void db_empty_task(struct work_struct *work)
2793 {
2794         struct adapter *adapter = container_of(work, struct adapter,
2795                                                db_empty_task);
2796
2797         cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
2798 }
2799
2800 static void db_drop_task(struct work_struct *work)
2801 {
2802         struct adapter *adapter = container_of(work, struct adapter,
2803                                                db_drop_task);
2804         unsigned long delay = 1000;
2805         unsigned short r;
2806
2807         cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
2808
2809         /*
2810          * Sleep a while before ringing the driver qset dbs.
2811          * The delay is between 1000-2023 usecs.
2812          */
2813         get_random_bytes(&r, 2);
2814         delay += r & 1023;
2815         set_current_state(TASK_UNINTERRUPTIBLE);
2816         schedule_timeout(usecs_to_jiffies(delay));
2817         ring_dbs(adapter);
2818 }
2819
2820 /*
2821  * Processes external (PHY) interrupts in process context.
2822  */
2823 static void ext_intr_task(struct work_struct *work)
2824 {
2825         struct adapter *adapter = container_of(work, struct adapter,
2826                                                ext_intr_handler_task);
2827         int i;
2828
2829         /* Disable link fault interrupts */
2830         for_each_port(adapter, i) {
2831                 struct net_device *dev = adapter->port[i];
2832                 struct port_info *p = netdev_priv(dev);
2833
2834                 t3_xgm_intr_disable(adapter, i);
2835                 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2836         }
2837
2838         /* Re-enable link fault interrupts */
2839         t3_phy_intr_handler(adapter);
2840
2841         for_each_port(adapter, i)
2842                 t3_xgm_intr_enable(adapter, i);
2843
2844         /* Now reenable external interrupts */
2845         spin_lock_irq(&adapter->work_lock);
2846         if (adapter->slow_intr_mask) {
2847                 adapter->slow_intr_mask |= F_T3DBG;
2848                 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2849                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2850                              adapter->slow_intr_mask);
2851         }
2852         spin_unlock_irq(&adapter->work_lock);
2853 }
2854
2855 /*
2856  * Interrupt-context handler for external (PHY) interrupts.
2857  */
2858 void t3_os_ext_intr_handler(struct adapter *adapter)
2859 {
2860         /*
2861          * Schedule a task to handle external interrupts as they may be slow
2862          * and we use a mutex to protect MDIO registers.  We disable PHY
2863          * interrupts in the meantime and let the task reenable them when
2864          * it's done.
2865          */
2866         spin_lock(&adapter->work_lock);
2867         if (adapter->slow_intr_mask) {
2868                 adapter->slow_intr_mask &= ~F_T3DBG;
2869                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2870                              adapter->slow_intr_mask);
2871                 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2872         }
2873         spin_unlock(&adapter->work_lock);
2874 }
2875
2876 void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2877 {
2878         struct net_device *netdev = adapter->port[port_id];
2879         struct port_info *pi = netdev_priv(netdev);
2880
2881         spin_lock(&adapter->work_lock);
2882         pi->link_fault = 1;
2883         spin_unlock(&adapter->work_lock);
2884 }
2885
2886 static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq)
2887 {
2888         int i, ret = 0;
2889
2890         if (is_offload(adapter) &&
2891             test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2892                 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2893                 offload_close(&adapter->tdev);
2894         }
2895
2896         /* Stop all ports */
2897         for_each_port(adapter, i) {
2898                 struct net_device *netdev = adapter->port[i];
2899
2900                 if (netif_running(netdev))
2901                         __cxgb_close(netdev, on_wq);
2902         }
2903
2904         /* Stop SGE timers */
2905         t3_stop_sge_timers(adapter);
2906
2907         adapter->flags &= ~FULL_INIT_DONE;
2908
2909         if (reset)
2910                 ret = t3_reset_adapter(adapter);
2911
2912         pci_disable_device(adapter->pdev);
2913
2914         return ret;
2915 }
2916
2917 static int t3_reenable_adapter(struct adapter *adapter)
2918 {
2919         if (pci_enable_device(adapter->pdev)) {
2920                 dev_err(&adapter->pdev->dev,
2921                         "Cannot re-enable PCI device after reset.\n");
2922                 goto err;
2923         }
2924         pci_set_master(adapter->pdev);
2925         pci_restore_state(adapter->pdev);
2926         pci_save_state(adapter->pdev);
2927
2928         /* Free sge resources */
2929         t3_free_sge_resources(adapter);
2930
2931         if (t3_replay_prep_adapter(adapter))
2932                 goto err;
2933
2934         return 0;
2935 err:
2936         return -1;
2937 }
2938
2939 static void t3_resume_ports(struct adapter *adapter)
2940 {
2941         int i;
2942
2943         /* Restart the ports */
2944         for_each_port(adapter, i) {
2945                 struct net_device *netdev = adapter->port[i];
2946
2947                 if (netif_running(netdev)) {
2948                         if (cxgb_open(netdev)) {
2949                                 dev_err(&adapter->pdev->dev,
2950                                         "can't bring device back up"
2951                                         " after reset\n");
2952                                 continue;
2953                         }
2954                 }
2955         }
2956
2957         if (is_offload(adapter) && !ofld_disable)
2958                 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2959 }
2960
2961 /*
2962  * processes a fatal error.
2963  * Bring the ports down, reset the chip, bring the ports back up.
2964  */
2965 static void fatal_error_task(struct work_struct *work)
2966 {
2967         struct adapter *adapter = container_of(work, struct adapter,
2968                                                fatal_error_handler_task);
2969         int err = 0;
2970
2971         rtnl_lock();
2972         err = t3_adapter_error(adapter, 1, 1);
2973         if (!err)
2974                 err = t3_reenable_adapter(adapter);
2975         if (!err)
2976                 t3_resume_ports(adapter);
2977
2978         CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2979         rtnl_unlock();
2980 }
2981
2982 void t3_fatal_err(struct adapter *adapter)
2983 {
2984         unsigned int fw_status[4];
2985
2986         if (adapter->flags & FULL_INIT_DONE) {
2987                 t3_sge_stop(adapter);
2988                 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2989                 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2990                 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2991                 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2992
2993                 spin_lock(&adapter->work_lock);
2994                 t3_intr_disable(adapter);
2995                 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2996                 spin_unlock(&adapter->work_lock);
2997         }
2998         CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2999         if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
3000                 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
3001                          fw_status[0], fw_status[1],
3002                          fw_status[2], fw_status[3]);
3003 }
3004
3005 /**
3006  * t3_io_error_detected - called when PCI error is detected
3007  * @pdev: Pointer to PCI device
3008  * @state: The current pci connection state
3009  *
3010  * This function is called after a PCI bus error affecting
3011  * this device has been detected.
3012  */
3013 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
3014                                              pci_channel_state_t state)
3015 {
3016         struct adapter *adapter = pci_get_drvdata(pdev);
3017
3018         if (state == pci_channel_io_perm_failure)
3019                 return PCI_ERS_RESULT_DISCONNECT;
3020
3021         t3_adapter_error(adapter, 0, 0);
3022
3023         /* Request a slot reset. */
3024         return PCI_ERS_RESULT_NEED_RESET;
3025 }
3026
3027 /**
3028  * t3_io_slot_reset - called after the pci bus has been reset.
3029  * @pdev: Pointer to PCI device
3030  *
3031  * Restart the card from scratch, as if from a cold-boot.
3032  */
3033 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
3034 {
3035         struct adapter *adapter = pci_get_drvdata(pdev);
3036
3037         if (!t3_reenable_adapter(adapter))
3038                 return PCI_ERS_RESULT_RECOVERED;
3039
3040         return PCI_ERS_RESULT_DISCONNECT;
3041 }
3042
3043 /**
3044  * t3_io_resume - called when traffic can start flowing again.
3045  * @pdev: Pointer to PCI device
3046  *
3047  * This callback is called when the error recovery driver tells us that
3048  * its OK to resume normal operation.
3049  */
3050 static void t3_io_resume(struct pci_dev *pdev)
3051 {
3052         struct adapter *adapter = pci_get_drvdata(pdev);
3053
3054         CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
3055                  t3_read_reg(adapter, A_PCIE_PEX_ERR));
3056
3057         rtnl_lock();
3058         t3_resume_ports(adapter);
3059         rtnl_unlock();
3060 }
3061
3062 static const struct pci_error_handlers t3_err_handler = {
3063         .error_detected = t3_io_error_detected,
3064         .slot_reset = t3_io_slot_reset,
3065         .resume = t3_io_resume,
3066 };
3067
3068 /*
3069  * Set the number of qsets based on the number of CPUs and the number of ports,
3070  * not to exceed the number of available qsets, assuming there are enough qsets
3071  * per port in HW.
3072  */
3073 static void set_nqsets(struct adapter *adap)
3074 {
3075         int i, j = 0;
3076         int num_cpus = netif_get_num_default_rss_queues();
3077         int hwports = adap->params.nports;
3078         int nqsets = adap->msix_nvectors - 1;
3079
3080         if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
3081                 if (hwports == 2 &&
3082                     (hwports * nqsets > SGE_QSETS ||
3083                      num_cpus >= nqsets / hwports))
3084                         nqsets /= hwports;
3085                 if (nqsets > num_cpus)
3086                         nqsets = num_cpus;
3087                 if (nqsets < 1 || hwports == 4)
3088                         nqsets = 1;
3089         } else
3090                 nqsets = 1;
3091
3092         for_each_port(adap, i) {
3093                 struct port_info *pi = adap2pinfo(adap, i);
3094
3095                 pi->first_qset = j;
3096                 pi->nqsets = nqsets;
3097                 j = pi->first_qset + nqsets;
3098
3099                 dev_info(&adap->pdev->dev,
3100                          "Port %d using %d queue sets.\n", i, nqsets);
3101         }
3102 }
3103
3104 static int cxgb_enable_msix(struct adapter *adap)
3105 {
3106         struct msix_entry entries[SGE_QSETS + 1];
3107         int vectors;
3108         int i;
3109
3110         vectors = ARRAY_SIZE(entries);
3111         for (i = 0; i < vectors; ++i)
3112                 entries[i].entry = i;
3113
3114         vectors = pci_enable_msix_range(adap->pdev, entries,
3115                                         adap->params.nports + 1, vectors);
3116         if (vectors < 0)
3117                 return vectors;
3118
3119         for (i = 0; i < vectors; ++i)
3120                 adap->msix_info[i].vec = entries[i].vector;
3121         adap->msix_nvectors = vectors;
3122
3123         return 0;
3124 }
3125
3126 static void print_port_info(struct adapter *adap, const struct adapter_info *ai)
3127 {
3128         static const char *pci_variant[] = {
3129                 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
3130         };
3131
3132         int i;
3133         char buf[80];
3134
3135         if (is_pcie(adap))
3136                 snprintf(buf, sizeof(buf), "%s x%d",
3137                          pci_variant[adap->params.pci.variant],
3138                          adap->params.pci.width);
3139         else
3140                 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
3141                          pci_variant[adap->params.pci.variant],
3142                          adap->params.pci.speed, adap->params.pci.width);
3143
3144         for_each_port(adap, i) {
3145                 struct net_device *dev = adap->port[i];
3146                 const struct port_info *pi = netdev_priv(dev);
3147
3148                 if (!test_bit(i, &adap->registered_device_map))
3149                         continue;
3150                 netdev_info(dev, "%s %s %sNIC (rev %d) %s%s\n",
3151                             ai->desc, pi->phy.desc,
3152                             is_offload(adap) ? "R" : "", adap->params.rev, buf,
3153                             (adap->flags & USING_MSIX) ? " MSI-X" :
3154                             (adap->flags & USING_MSI) ? " MSI" : "");
3155                 if (adap->name == dev->name && adap->params.vpd.mclk)
3156                         pr_info("%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
3157                                adap->name, t3_mc7_size(&adap->cm) >> 20,
3158                                t3_mc7_size(&adap->pmtx) >> 20,
3159                                t3_mc7_size(&adap->pmrx) >> 20,
3160                                adap->params.vpd.sn);
3161         }
3162 }
3163
3164 static const struct net_device_ops cxgb_netdev_ops = {
3165         .ndo_open               = cxgb_open,
3166         .ndo_stop               = cxgb_close,
3167         .ndo_start_xmit         = t3_eth_xmit,
3168         .ndo_get_stats          = cxgb_get_stats,
3169         .ndo_validate_addr      = eth_validate_addr,
3170         .ndo_set_rx_mode        = cxgb_set_rxmode,
3171         .ndo_do_ioctl           = cxgb_ioctl,
3172         .ndo_change_mtu         = cxgb_change_mtu,
3173         .ndo_set_mac_address    = cxgb_set_mac_addr,
3174         .ndo_fix_features       = cxgb_fix_features,
3175         .ndo_set_features       = cxgb_set_features,
3176 #ifdef CONFIG_NET_POLL_CONTROLLER
3177         .ndo_poll_controller    = cxgb_netpoll,
3178 #endif
3179 };
3180
3181 static void cxgb3_init_iscsi_mac(struct net_device *dev)
3182 {
3183         struct port_info *pi = netdev_priv(dev);
3184
3185         memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN);
3186         pi->iscsic.mac_addr[3] |= 0x80;
3187 }
3188
3189 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
3190 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
3191                         NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3192 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3193 {
3194         int i, err, pci_using_dac = 0;
3195         resource_size_t mmio_start, mmio_len;
3196         const struct adapter_info *ai;
3197         struct adapter *adapter = NULL;
3198         struct port_info *pi;
3199
3200         pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
3201
3202         if (!cxgb3_wq) {
3203                 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3204                 if (!cxgb3_wq) {
3205                         pr_err("cannot initialize work queue\n");
3206                         return -ENOMEM;
3207                 }
3208         }
3209
3210         err = pci_enable_device(pdev);
3211         if (err) {
3212                 dev_err(&pdev->dev, "cannot enable PCI device\n");
3213                 goto out;
3214         }
3215
3216         err = pci_request_regions(pdev, DRV_NAME);
3217         if (err) {
3218                 /* Just info, some other driver may have claimed the device. */
3219                 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3220                 goto out_disable_device;
3221         }
3222
3223         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3224                 pci_using_dac = 1;
3225                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3226                 if (err) {
3227                         dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3228                                "coherent allocations\n");
3229                         goto out_release_regions;
3230                 }
3231         } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3232                 dev_err(&pdev->dev, "no usable DMA configuration\n");
3233                 goto out_release_regions;
3234         }
3235
3236         pci_set_master(pdev);
3237         pci_save_state(pdev);
3238
3239         mmio_start = pci_resource_start(pdev, 0);
3240         mmio_len = pci_resource_len(pdev, 0);
3241         ai = t3_get_adapter_info(ent->driver_data);
3242
3243         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3244         if (!adapter) {
3245                 err = -ENOMEM;
3246                 goto out_release_regions;
3247         }
3248
3249         adapter->nofail_skb =
3250                 alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3251         if (!adapter->nofail_skb) {
3252                 dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3253                 err = -ENOMEM;
3254                 goto out_free_adapter;
3255         }
3256
3257         adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3258         if (!adapter->regs) {
3259                 dev_err(&pdev->dev, "cannot map device registers\n");
3260                 err = -ENOMEM;
3261                 goto out_free_adapter_nofail;
3262         }
3263
3264         adapter->pdev = pdev;
3265         adapter->name = pci_name(pdev);
3266         adapter->msg_enable = dflt_msg_enable;
3267         adapter->mmio_len = mmio_len;
3268
3269         mutex_init(&adapter->mdio_lock);
3270         spin_lock_init(&adapter->work_lock);
3271         spin_lock_init(&adapter->stats_lock);
3272
3273         INIT_LIST_HEAD(&adapter->adapter_list);
3274         INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3275         INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3276
3277         INIT_WORK(&adapter->db_full_task, db_full_task);
3278         INIT_WORK(&adapter->db_empty_task, db_empty_task);
3279         INIT_WORK(&adapter->db_drop_task, db_drop_task);
3280
3281         INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3282
3283         for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3284                 struct net_device *netdev;
3285
3286                 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3287                 if (!netdev) {
3288                         err = -ENOMEM;
3289                         goto out_free_dev;
3290                 }
3291
3292                 SET_NETDEV_DEV(netdev, &pdev->dev);
3293
3294                 adapter->port[i] = netdev;
3295                 pi = netdev_priv(netdev);
3296                 pi->adapter = adapter;
3297                 pi->port_id = i;
3298                 netif_carrier_off(netdev);
3299                 netdev->irq = pdev->irq;
3300                 netdev->mem_start = mmio_start;
3301                 netdev->mem_end = mmio_start + mmio_len - 1;
3302                 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
3303                         NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
3304                 netdev->features |= netdev->hw_features |
3305                                     NETIF_F_HW_VLAN_CTAG_TX;
3306                 netdev->vlan_features |= netdev->features & VLAN_FEAT;
3307                 if (pci_using_dac)
3308                         netdev->features |= NETIF_F_HIGHDMA;
3309
3310                 netdev->netdev_ops = &cxgb_netdev_ops;
3311                 netdev->ethtool_ops = &cxgb_ethtool_ops;
3312         }
3313
3314         pci_set_drvdata(pdev, adapter);
3315         if (t3_prep_adapter(adapter, ai, 1) < 0) {
3316                 err = -ENODEV;
3317                 goto out_free_dev;
3318         }
3319
3320         /*
3321          * The card is now ready to go.  If any errors occur during device
3322          * registration we do not fail the whole card but rather proceed only
3323          * with the ports we manage to register successfully.  However we must
3324          * register at least one net device.
3325          */
3326         for_each_port(adapter, i) {
3327                 err = register_netdev(adapter->port[i]);
3328                 if (err)
3329                         dev_warn(&pdev->dev,
3330                                  "cannot register net device %s, skipping\n",
3331                                  adapter->port[i]->name);
3332                 else {
3333                         /*
3334                          * Change the name we use for messages to the name of
3335                          * the first successfully registered interface.
3336                          */
3337                         if (!adapter->registered_device_map)
3338                                 adapter->name = adapter->port[i]->name;
3339
3340                         __set_bit(i, &adapter->registered_device_map);
3341                 }
3342         }
3343         if (!adapter->registered_device_map) {
3344                 dev_err(&pdev->dev, "could not register any net devices\n");
3345                 goto out_free_dev;
3346         }
3347
3348         for_each_port(adapter, i)
3349                 cxgb3_init_iscsi_mac(adapter->port[i]);
3350
3351         /* Driver's ready. Reflect it on LEDs */
3352         t3_led_ready(adapter);
3353
3354         if (is_offload(adapter)) {
3355                 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3356                 cxgb3_adapter_ofld(adapter);
3357         }
3358
3359         /* See what interrupts we'll be using */
3360         if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3361                 adapter->flags |= USING_MSIX;
3362         else if (msi > 0 && pci_enable_msi(pdev) == 0)
3363                 adapter->flags |= USING_MSI;
3364
3365         set_nqsets(adapter);
3366
3367         err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3368                                  &cxgb3_attr_group);
3369
3370         print_port_info(adapter, ai);
3371         return 0;
3372
3373 out_free_dev:
3374         iounmap(adapter->regs);
3375         for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3376                 if (adapter->port[i])
3377                         free_netdev(adapter->port[i]);
3378
3379 out_free_adapter_nofail:
3380         kfree_skb(adapter->nofail_skb);
3381
3382 out_free_adapter:
3383         kfree(adapter);
3384
3385 out_release_regions:
3386         pci_release_regions(pdev);
3387 out_disable_device:
3388         pci_disable_device(pdev);
3389 out:
3390         return err;
3391 }
3392
3393 static void remove_one(struct pci_dev *pdev)
3394 {
3395         struct adapter *adapter = pci_get_drvdata(pdev);
3396
3397         if (adapter) {
3398                 int i;
3399
3400                 t3_sge_stop(adapter);
3401                 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3402                                    &cxgb3_attr_group);
3403
3404                 if (is_offload(adapter)) {
3405                         cxgb3_adapter_unofld(adapter);
3406                         if (test_bit(OFFLOAD_DEVMAP_BIT,
3407                                      &adapter->open_device_map))
3408                                 offload_close(&adapter->tdev);
3409                 }
3410
3411                 for_each_port(adapter, i)
3412                     if (test_bit(i, &adapter->registered_device_map))
3413                         unregister_netdev(adapter->port[i]);
3414
3415                 t3_stop_sge_timers(adapter);
3416                 t3_free_sge_resources(adapter);
3417                 cxgb_disable_msi(adapter);
3418
3419                 for_each_port(adapter, i)
3420                         if (adapter->port[i])
3421                                 free_netdev(adapter->port[i]);
3422
3423                 iounmap(adapter->regs);
3424                 if (adapter->nofail_skb)
3425                         kfree_skb(adapter->nofail_skb);
3426                 kfree(adapter);
3427                 pci_release_regions(pdev);
3428                 pci_disable_device(pdev);
3429         }
3430 }
3431
3432 static struct pci_driver driver = {
3433         .name = DRV_NAME,
3434         .id_table = cxgb3_pci_tbl,
3435         .probe = init_one,
3436         .remove = remove_one,
3437         .err_handler = &t3_err_handler,
3438 };
3439
3440 static int __init cxgb3_init_module(void)
3441 {
3442         int ret;
3443
3444         cxgb3_offload_init();
3445
3446         ret = pci_register_driver(&driver);
3447         return ret;
3448 }
3449
3450 static void __exit cxgb3_cleanup_module(void)
3451 {
3452         pci_unregister_driver(&driver);
3453         if (cxgb3_wq)
3454                 destroy_workqueue(cxgb3_wq);
3455 }
3456
3457 module_init(cxgb3_init_module);
3458 module_exit(cxgb3_cleanup_module);